/*
 * SMPFunction.cpp - <see below>.
 *
 * Copyright (c) 2000, 2001, 2010 - University of Virginia 
 *
 * This file is part of the Memory Error Detection System (MEDS) infrastructure.
 * This file may be used and modified for non-commercial purposes as long as 
 * all copyright, permission, and nonwarranty notices are preserved.  
 * Redistribution is prohibited without prior written consent from the University 
 * of Virginia.
 *
 * Please contact the authors for restrictions applying to commercial use.
 *
 * THIS SOURCE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
 * MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
 *
 * Author: University of Virginia
 * e-mail: jwd@virginia.com
 * URL   : http://www.cs.virginia.edu/
 *
 * Additional copyrights 2010, 2011, 2012, 2013, 2014, 2015 by Zephyr Software LLC
 * e-mail: {clc,jwd}@zephyr-software.com
 * URL   : http://www.zephyr-software.com/
 *
 */

//
// SMPFunction.cpp
//
// This module performs the fundamental data flow analyses needed for the
//   SMP project (Software Memory Protection) at the function level.
//

using namespace std;

#include <utility>
#include <list>
#include <set>
#include <vector>
#include <iterator>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <iomanip>

#include <cstring>
#include <cstdint>
#include <cstdlib>
#include <cassert>

#include "interfaces/SMPDBInterface.h"
#include "base/SMPDataFlowAnalysis.h"
#include "base/SMPFunction.h"
#include "base/SMPBasicBlock.h"
#include "base/SMPInstr.h"
#include "base/SMPProgram.h"

// Set to 1 for debugging output
#define SMP_DEBUG 1
#define SMP_DEBUG2 0   // verbose
#define SMP_DEBUG3 0   // verbose
#define SMP_DEBUG_CONTROLFLOW 0  // tells what processing stage is entered
#define SMP_DEBUG_XOR 0
#define SMP_DEBUG_CHUNKS 1  // tracking down tail chunks for functions
#define SMP_DEBUG_FRAMEFIXUP 1
#define SMP_DEBUG_FRAMEFIXUP_VERBOSE 0
#define SMP_DEBUG_DATAFLOW 0
#define SMP_DEBUG_DATAFLOW_VERBOSE 0
#define SMP_DEBUG_TYPE_INFERENCE 0
#define SMP_DEBUG_PROFILED_TYPE_INFERENCE 0
#define SMP_DEBUG_FUNC 0
#define SMP_DEBUG_FUNC_SAFETY 1
#define SMP_VERBOSE_DEBUG_FUNC 0
#define SMP_DEBUG_BUILD_RTL 1   // leave this on; serious errors reported
#define SMP_DEBUG_UNINITIALIZED_SSA_NAMES 1
#define SMP_WARN_UNUSED_DEFS 0
#define SMP_DEBUG_SWITCH_TABLE_INFO 0
#define SMP_OPTIMIZE_BLOCK_PROFILING 0
#define SMP_AUDIT_STACK_POINTER_DELTAS 0
#define SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS 1
#define STARS_AGGRESSIVE_SIGNEDNESS_PROPAGATION 1
#define STARS_BUILD_LOOP_BITSET 1  // Build bitset in this->FuncLoopsByBlock
#define STARS_DEBUG_FUNC_SCCP 0
#define STARS_DEBUG_FUNC_SCCP_VERBOSE 1
#define STARS_DEBUG_LOOP_INVARIANTS 0
#define STARS_DEBUG_FPTR_SHADOW_LIST 0
#define SPARK_EMIT_UNIV_QUANTIFIER_PRECONDITIONS 0  // Use for-all-i form of restrictions on memory ranges

// For debugging purposes, only emit SPARK Ada for main().
#define STARS_EMIT_ADA_FOR_MAIN_ONLY 0

// Compute LVA/SSA or not? Turn it off for NICECAP demo on 31-JAN-2008
#define SMP_COMPUTE_LVA_SSA 1

// Use conditional type propagation on phi functions
#define SMP_CONDITIONAL_TYPE_PROPAGATION 1

// Kludges to fix IDA Pro 5.2 errors in cc1.ncexe
#define SMP_IDAPRO52_WORKAROUND 0

// Basic block number 0 is the top of the CFG.
#define SMP_TOP_BLOCK 0 

// Set SharedTailChunks to TRUE for entire printf family
//  After we restructure the parent/tail structure of the database, this
//  will go away.
#define KLUDGE_VFPRINTF_FAMILY 0

// C99 standard permits up to 127 arguments to a single function,
//  but FORTRAN has no limit. We have encountered 290 in practice.
#define STARS_MAX_ARGS_PER_FUNC 512


#if 0
// moved to idapro/STARSFunction.cpp 
// Used for binary search by function number in SMPStaticAnalyzer.cpp
//  to trigger debugging output and find which instruction in which
//  function is causing a crash.
bool SMPBinaryDebug = false;
#endif

using namespace std;


// helper function to determine if an object is in a vector
template <class T>	
bool vector_exists(const T &item, const vector<T> &vec) {
	for (std::size_t i = 0; i < vec.size(); ++i) {
		if (vec[i] == item)
			return true;
	}
	return false;
}

// Comparison function for sorting.
bool LocalVarCompare(const LocalVar &LV1, const LocalVar &LV2) {
	return (LV1.offset < LV2.offset);
}

// Are the operands, SSA numbers, and SubtractAddend fields identical?
bool EqualInductionVars(const InductionVarTriple &IV1, const InductionVarTriple &IV2) {
	bool success = (IV1.InductionVar.GetSSANum() == IV2.InductionVar.GetSSANum())
		&& (IV1.SubtractAddend == IV2.SubtractAddend);
	if (success) {
		success = IsEqOp(IV1.InductionVar.GetOp(), IV2.InductionVar.GetOp());
		if (success) {
			success = (IsEqOp(IV1.Addend.GetOp(), IV2.Addend.GetOp())
				&& (IsEqOp(IV1.Multiplier.GetOp(), IV2.Multiplier.GetOp())));
		}
	}
	return success;
} // end of EqualInductionVars()

// Debug dump of induction variable.
void DumpInductionVar(const struct InductionVarTriple IndVar) {
	SMP_msg("InductionVar: Multiplier: ");
	IndVar.Multiplier.Dump();
	SMP_msg(" Induction Ref: ");
	IndVar.InductionVar.Dump();
	if (IndVar.SubtractAddend) {
		SMP_msg(" AddSubOperator: - ");
	}
	else {
		SMP_msg(" AddSubOperator: + ");
	}
	SMP_msg(" Induction Addend: ");
	IndVar.Addend.Dump();
	SMP_msg("\n");
	return;
} // end of DumpInductionVar()

// Debug dump of InductionVarFamily.
void DumpInductionVarFamily(const struct InductionVarFamily IVFamily) {
	SMP_msg(" BIVIncomingSSA: %d BIVIncomingDefAddr: %llx BIVInsideLoopDefAddrs:", 
		IVFamily.BIVIncomingSSANum, (uint64_t) IVFamily.BIVIncomingDefAddr);
	for (vector<STARS_ea_t>::const_iterator AddrIter = IVFamily.BIVInsideLoopDefAddrs.cbegin();
		AddrIter != IVFamily.BIVInsideLoopDefAddrs.cend();
		++AddrIter) {
		SMP_msg(" %llx", (uint64_t) (*AddrIter));
	}
	SMP_msg(" BIV: ");
	DumpInductionVar(IVFamily.BasicInductionVar);
	for (size_t index = 0; index < IVFamily.DependentInductionVars.size(); ++index) {
		struct DependentInductionVar DIV = IVFamily.DependentInductionVars[index];
		SMP_msg("DIVDefAddr: %llx DIVRef: ", (uint64_t) DIV.DIVDefAddr);
		DIV.DIV.Dump();
		SMP_msg(" DIV equation: ");
		DumpInductionVar(DIV.IVExpr);
	}
} // end of DumpInductionVarFamily()

// Determine whether we have an incrementing or decrementing loop based on
//  the BasicInductionVar.
bool IsPositiveIncrementBIV(const struct InductionVarTriple BIV) {
	bool PositiveIncrement = true; // default
	STARSOpndTypePtr AddendOp = BIV.Addend.GetOp();
	if (AddendOp->IsImmedOp()) {
		STARS_uval_t ImmedValue = AddendOp->GetImmedValue();
		int SignedValue = (int) ImmedValue;
		if (SignedValue < 0) {
			PositiveIncrement = BIV.SubtractAddend;
		}
		else {
			PositiveIncrement = (!BIV.SubtractAddend);
		}
	}

	return PositiveIncrement;
} // end of IsPositiveIncrementBIV()

// *****************************************************************
// Class SMPFunction
// *****************************************************************

// Constructor
SMPFunction::SMPFunction(STARS_Function_t *Info, SMPProgram* pgm) {
	this->Program = pgm;
	this->FuncInfo = Info;
	this->FirstEA = this->FuncInfo->get_startEA();
	this->BlockCount = 0;
	this->LoopCount = 0;
	this->FuncProcessed = false;
	this->UseFP = false;
	this->StaticFunc = false;
	this->LibFunc = false;
	this->HasReturnInst = false;
	this->IndirectCalls = false;
	this->UnresolvedIndirectCalls = false;
	this->IndirectJumps = false;
	this->UnresolvedIndirectJumps = false;
	this->SystemCalls = false;
	this->DirectlyRecursive = false;
	this->MutuallyRecursive = false;
//	this->SetSharedChunks(false);
	this->UnsharedChunks = false;
	this->MultipleEntryPoints = false;
	this->CalledFromOrphanCode = false;
	this->TailCallChainFromOrphanCode = false;
	this->CallsNonReturningFunc = false;
	this->CalleeChainCallsNonReturningFunc = false;
	this->CallsAlloca = false;
	this->PushAfterLocalVarAlloc = false;
	this->LinkerStub = false;
	this->ThunkFunc = false;
	this->AnalyzedSP = false;
	this->STARSStackPtrAnalysisPerformed = false;
	this->StackAdjustmentComputed = false;
	this->BuiltRTLs = false;
	this->HasReducibleCFG = false;
	this->HasStructuredCFG = false;
	this->HasGoodSSA = false;
#if 1  // default to unsafe
	this->SetFuncSafe(false);
	this->SetSpecFuncSafe(false);
	this->SafeCallee = false;
	this->SpecSafeCallee = false;
	this->UnsafeForFastReturns = true;
#else // default to safe
	this->SetFuncSafe(true);
	this->SpecSafeFunc = true;
	this->SafeCallee = true;
	this->SpecSafeCallee = true;
	this->UnsafeForFastReturns = false;
#endif
	this->WritesAboveRA = false;
	this->NeedsStackReferent = true;
	this->SpecNeedsStackReferent = true;
	this->HasIndirectWrites = false;
	this->HasUnsafeIndirectWrites = false;
	this->AltersMemory = false;
	this->HasLoopInArgMemWrites = false;
	this->HasMemExprsFromCalleeLoops = false;
	this->PossibleIndirectCallTarget = false;
	this->PossibleTailCallTarget = false;
	this->ReturnTargetsComputed = false;
	this->OutgoingArgsComputed = false;
	this->GoodLocalVarTable = false;
	this->StackFrameExtendsPastStackTop = false;
	this->SetIsSpeculative(false);
	this->HasHashingCode = false;
	this->HasInArgCodePointer = false;
	this->HasInArgDataPointer = false;
	this->HasMallocCall = false;
	this->TranslatingSPARKLoop = false;
	this->TypedDefs = 0;
	this->UntypedDefs = 0;
	this->TypedPhiDefs = 0;
	this->UntypedPhiDefs = 0;
	this->SafeBlocks = 0;
	this->UnsafeBlocks = 0;
	this->Size = 0;

#if 0
	// The sizes of the three regions of the stack frame other than the
	//  return address are stored in the function structure.
	this->LocalVarsSize = this->FuncInfo->GetFrameSize();
	this->CalleeSavedRegsSize = this->FuncInfo->GetSavedRegSize();
	this->IncomingArgsSize = (STARS_asize_t) this->FuncInfo->GetIncomingArgumentSize();

	// The return address size can be obtained in a machine independent
	//  way by calling get_frame_retsize(). 
	this->RetAddrSize = /* get_frame_retsize(this->GetFuncInfo()); */
			this->GetFuncInfo()->GetFrameReturnAddressSize();
#else  // compute values in MDFixFrameInfo() before their first use; avoid IDA Pro calls
	this->LocalVarsSize = 0;
	this->CalleeSavedRegsSize = 0;
	this->IncomingArgsSize = 0; // unused
	this->RetAddrSize = global_STARS_program->GetSTARS_ISA_Bytewidth();
#endif
	this->AllocSizeAfterFrameAlloc = 0;
	this->OutgoingArgsSize = 0;
	this->LocalVarsAllocInstr = STARS_BADADDR;
	this->LocalVarsDeallocInstr = STARS_BADADDR;
	this->AllocPointDelta = 0;
	this->MinStackDelta = 0;
	this->MaxStackDelta = 0;
	this->MinStackAccessOffset = 0;
	this->MaxStackAccessLimit = 0;
	this->NetStackDelta = CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA;
	this->PreAllocStackDelta = CALLING_CONVENTION_DEFAULT_PREFRAMEALLOC_STACK_DELTA;
	this->FramePointerStackDelta = 0;
	this->GlobalStackAdjustment = 0;
	this->SetLocalVarOffsetLimit(0);
	this->MaxDirectStackAccessDelta = 0;
	this->IDAReturnAddressOffset = 0;

	this->ReturnAddrStatus = FUNC_UNKNOWN;
	this->FastReturnStatus = SAFE_FAST_RETURN;

	this->InArgCount = 0;
	this->MaxInArgIndex = 0;
	this->TaintInArgPosBits = 0;
	this->MaxDirectStackAccessSSANum = 0;
	this->MaxRegSSANum = 0;
	this->InArgTypes.clear();
	this->InArgTypes.resize(STARS_MAX_ARGS_PER_FUNC);

	this->Instrs.clear();
	this->Blocks.clear();
	this->DirectCallTargets.clear();
	this->IndirectCallTargets.clear();
	this->AllCallTargets.clear();
	this->AllCallSources.clear();
	this->InstBlockMap.clear();
	this->RPOBlocks.clear();
	this->IDom.clear();
	this->DomTree.clear();
	this->GlobalNames.clear();
	this->BlocksDefinedIn.clear();
	this->SSACounter.clear();
	this->SSAStack.clear();
	this->LocalVarTable.clear();
	this->SavedRegLoc.clear();
	this->ReturnRegTypes.clear();
	this->LiveOutSet.clear();
	this->KillSet.clear();
	this->GlobalDefAddrBySSA.clear();

	struct FineGrainedInfo TempFG;
	TempFG.SignMiscInfo = 0;
	TempFG.SizeInfo = 0;
	for (int RegIndex = STARS_x86_R_ax; RegIndex <= global_STARS_program->GetSTARS_MD_LAST_SAVED_REG_NUM(); ++RegIndex) {
		this->SavedRegLoc.push_back(0); // zero offset means reg not saved
		this->ReturnRegTypes.push_back(UNINIT);
		this->ReturnRegFGInfo.push_back(TempFG);
		this->IncomingRegTypes.push_back(UNINIT);
	}
	this->InArgTypes.assign(STARS_MAX_ARGS_PER_FUNC, (unsigned short) UNINIT);

	return;
} // end of SMPFunction() constructor

SMPFunction::~SMPFunction() {
	list<SMPInstr *>::iterator InstIter;
	for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		if (NULL != CurrInst) delete CurrInst;
	}

	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		if (NULL != CurrBlock) delete CurrBlock;
	}
}

// Get a non-stale pointer to the STARS_Function_t info for the current function.
STARS_Function_t *SMPFunction::GetFuncInfo(void)  const {
	STARS_Function_t *myPtr = SMP_get_func(this->GetFirstFuncAddr());
	assert(NULL != myPtr);
	return myPtr;
}

uint16_t SMPFunction::GetJumpToFollowNodeCounter(STARS_ea_t InstAddr) const {
	uint16_t Counter = 0;
	map<STARS_ea_t, uint16_t>::const_iterator MapIter = this->JumpToFollowNodeCounterMap.find(InstAddr);
	if (MapIter != this->JumpToFollowNodeCounterMap.end()) {
		Counter = MapIter->second;
	}
	return Counter;
} // end of SMPFunction::GetJumpToFollowNodeCounter()

// Reset the Processed flags in all blocks to false.
void SMPFunction::ResetProcessedBlocks(void) {
	list<SMPBasicBlock *>::iterator CurrBlock;
	for (CurrBlock = this->Blocks.begin(); CurrBlock != this->Blocks.end(); ++CurrBlock) {
		(*CurrBlock)->SetProcessed(false);
	}
	return;
} // end of SMPFunction::ResetProcessedBlocks()

// Set SCCPVisited flag to false in all blocks
void SMPFunction::ResetSCCPVisitedBlocks(void) {
	list<SMPBasicBlock *>::iterator CurrBlock;
	for (CurrBlock = this->Blocks.begin(); CurrBlock != this->Blocks.end(); ++CurrBlock) {
		(*CurrBlock)->SetSCCPVisited(false);
	}
	return;
} // end of SMPFunction::ResetSCCPVisitedBlocks()

// Return an iterator for the beginning of the LiveInSet. 
STARSOpndSetIter SMPFunction::GetFirstLiveIn(void) {
	return this->Blocks.front()->GetFirstLiveIn();
} // end of SMPBasicBlock::GetFirstLiveIn()

// Get termination iterator marker for the LiveIn set.
STARSOpndSetIter SMPFunction::GetLastLiveIn(void) {
	return this->Blocks.front()->GetLastLiveIn();
}

// Get iterator for the start of the LiveOut set.
STARSOpndSetIter SMPFunction::GetFirstLiveOut(void) {
	return this->LiveOutSet.begin();
}

// Get termination iterator marker for the LiveOut set.
STARSOpndSetIter SMPFunction::GetLastLiveOut(void) {
	return this->LiveOutSet.end();
}

// Get iterator for the start of the VarKill set.
STARSOpndSetIter SMPFunction::GetFirstVarKill(void) {
	return this->KillSet.begin();
}

// Get termination iterator marker for the VarKill set.
STARSOpndSetIter SMPFunction::GetLastVarKill(void) {
	return this->KillSet.end();
}

// Compute InRegs, OutRegs, LiveOut, Kill sets for function
void SMPFunction::ComputeGlobalSets(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		STARSOpndSetIter LVASetIter;
		pair<STARSOpndSetIter, bool> InsertResult;
		for (LVASetIter = CurrBlock->GetFirstVarKill(); LVASetIter != CurrBlock->GetLastVarKill(); ++LVASetIter) {
			STARSOpndTypePtr KilledOp = (*LVASetIter);
			InsertResult = this->KillSet.insert(KilledOp); // insert will often fail due to duplication, which is expected
		}
		if (CurrBlock->HasReturn()) {
			// LiveOut for return blocks are unioned to make the LiveOut for the function.
			for (LVASetIter = CurrBlock->GetFirstLiveOut(); LVASetIter != CurrBlock->GetLastLiveOut(); ++LVASetIter) {
				STARSOpndTypePtr LiveOutOp = (*LVASetIter);
				InsertResult = this->LiveOutSet.insert(LiveOutOp); // insert will often fail due to duplication, which is expected
			}
		}
	}
	return;
} // end of SMPFunction::ComputeGlobalSets()

// compute InputRegs (USEs) and OutputRegs (DEFs), only inherit from callees on InheritPass
bool SMPFunction::ComputeInOutRegs(bool InheritPass, bool &WritesMem, bool &CallChainNonReturning) {
	bool Changed = false;
	bool MemoryInput = false;
	bool MemoryOutput = false;
	CallChainNonReturning = false;

	if (InheritPass) {
		SMP_msg("INFO: InheritPass for ComputeInOutRegs(), function %s at %llx\n", this->GetFuncName(), (uint64_t) this->GetFirstFuncAddr());
		// Look at all callees and union their bitsets in.
		size_t OldInputBitCount = this->InputRegs.count();
		size_t OldOutputBitCount = this->OutputRegs.count();
		size_t OldCalleePreservedRegsBitCount = this->CalleePreservedRegs.count();
		size_t NumCallees = this->GetNumCallTargets();
		for (size_t CalleeIndex = 0; CalleeIndex < NumCallees; ++CalleeIndex) {
			STARS_ea_t CalleeAddr = this->GetCallTargetAddr(CalleeIndex);
			if (STARS_BADADDR != CalleeAddr) {
				SMPFunction *CalleeFunc = this->GetProg()->FindFunction(CalleeAddr);
				if (nullptr != CalleeFunc) {
					this->InputRegs |= CalleeFunc->GetInputRegs();
					MemoryOutput |= CalleeFunc->AltersSPARKMemory();
					this->CalleeHasLoopInArgMemWrites |= CalleeFunc->UsesInArgsForLoopMemWrites();
					CallChainNonReturning |= (CalleeFunc->HasCallToNonReturningFunc() || CalleeFunc->HasCalleeChainWithNonReturningFunc());

					// Examine whether the callee preserves a reg before deciding how to record the OutputRegs info.
					std::bitset<1 + MD_LAST_REG_NO> TempCalleeOutputRegs = CalleeFunc->GetOutputRegs();
					std::bitset<1 + MD_LAST_REG_NO> TempCalleePreservedRegs = CalleeFunc->GetPreservedRegs();
					std::bitset<1 + MD_LAST_REG_NO> TempCalleeChainPreservedRegs = CalleeFunc->GetCalleePreservedRegs();
					for (size_t RegNo = 0; RegNo < TempCalleeOutputRegs.size(); ++RegNo) {
						if (TempCalleeOutputRegs[RegNo]) {
							// If RegNo is Preserved, record in CalleePreservedRegs.
							if (TempCalleePreservedRegs[RegNo]) {
								this->CalleePreservedRegs.set(RegNo);
							}
							else {
								// Altered, not preserved
								this->OutputRegs.set(RegNo);
							}
						}
						else if (TempCalleePreservedRegs[RegNo]) { // preserved in callee, not explicitly altered
							this->CalleePreservedRegs.set(RegNo);
						}
						else if (TempCalleeChainPreservedRegs[RegNo]) { // preserved in callee chain, not explicitly altered
							this->CalleePreservedRegs.set(RegNo);
						}
					}
				}
			}
		} // end for all callees
		Changed = ((OldInputBitCount < this->InputRegs.count()) || (OldOutputBitCount < this->OutputRegs.count()) || (OldCalleePreservedRegsBitCount < this->CalleePreservedRegs.count()));
	}

	// We need to record registers read and written in instructions, excluding calls with their
	//  conservative DEF and USE lists, and excluding the SSA Marker inst with its pseudo-DEFs.
	for (size_t BlockNum = 0; BlockNum < this->RPOBlocks.size(); ++BlockNum) {
		SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockNum];
		if (!InheritPass) {
			Changed = true;
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				STARS_ea_t InstAddr = CurrInst->GetAddr();
				if (STARS_IsSSAMarkerPseudoID(InstAddr))
					continue;

				SMPitype FlowType = CurrInst->GetDataFlowType();
				if ((DEFAULT != FlowType) && (JUMP != FlowType) && (INDIR_JUMP != FlowType)) {
					if ((CALL == FlowType) || (INDIR_CALL == FlowType) || (RETURN == FlowType)) {
						// Calls and returns change the stack pointer.
						this->OutputRegs.set((size_t) MD_STACK_POINTER_REG, true);
						this->InputRegs.set((size_t) MD_STACK_POINTER_REG, true);
					}
					continue;  // exclude CALL, INDIR_CALL, RETURN with their conservative USE and DEF lists
				}
				for (STARSDefUseIter UseIter = CurrInst->GetFirstUse(); UseIter != CurrInst->GetLastUse(); ++UseIter) {
					STARSOpndTypePtr UseOp = UseIter->GetOp();
					if (UseOp->IsRegOp() || UseOp->IsFloatingPointRegOp()) {
						STARS_regnum_t RegNo = UseOp->GetReg();
						this->InputRegs.set((size_t) RegNo, true);
					}
					else if (UseOp->IsMemOp()) {
						MemoryInput = true;
					}
				} // end for all USEs

				for (STARSDefUseIter DefIter = CurrInst->GetFirstDef(); DefIter != CurrInst->GetLastDef(); ++DefIter) {
					STARSOpndTypePtr DefOp = DefIter->GetOp();
					if (DefOp->IsRegOp() || DefOp->IsFloatingPointRegOp()) {
						STARS_regnum_t RegNo = DefOp->GetReg();
						this->OutputRegs.set((size_t) RegNo, true);
					}
					else if (DefOp->IsMemOp()) {
						MemoryOutput = true;
					}
				} // end for all DEFs
			} // end for all insts in block
			if (!MemoryOutput && CurrBlock->HasMemoryWrite()) {
				SMP_msg("INFO: SPARK: Found new MemoryOutput using HasMemoryWrite\n");
				MemoryOutput = true;  // could be non-stack mem, not in DEFs
			}
		}
		else if (CurrBlock->HasCallInstruction()) { // InheritPass
			int LoopNum = this->GetInnermostLoopNum(CurrBlock->GetNumber());
			size_t LoopNumOffsetByOne = (size_t)(LoopNum + 1);
			// Note: LoopNum could be -1 if we are outside of any loop.
			list<size_t> LoopList;
			if (LoopNum >= 0) {
				this->BuildLoopList((int) BlockNum, LoopList);
			}
			string CurrFuncName(this->GetFuncName());

			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				// Find call instruction and determine callee.
				SMPInstr *CurrInst = (*InstIter);
				STARS_ea_t InstAddr = CurrInst->GetAddr();
				SMPitype FlowType = CurrInst->GetDataFlowType();
				if ((CALL == FlowType) || (INDIR_CALL == FlowType) || ((RETURN == FlowType) && CurrInst->IsTailCall())) {
					STARS_ea_t CalleeAddr = CurrInst->GetCallTarget();
					if (STARS_BADADDR != CalleeAddr) {
						SMPFunction *CalleeFunc = this->GetProg()->FindFunction(CalleeAddr);
						if ((nullptr != CalleeFunc) && CalleeFunc->StackPtrAnalysisSucceeded() && CalleeFunc->HasStructuredControlFlow()) {
							SMP_msg("INFO: SPARK: Function %s inheriting mem exprs from callee %s\n", CurrFuncName.c_str(), CalleeFunc->GetFuncName());
							size_t NumCalleeLoops = CalleeFunc->GetNumLoops();
							for (size_t CalleeLoopIndexPlusOne = 0; CalleeLoopIndexPlusOne <= NumCalleeLoops; ++CalleeLoopIndexPlusOne) {
								// Get non-loop mem write exprs that trace back to InArgs, trace them back from actual arg in this function.
								size_t NumCalleeInArgMemAddrs = CalleeFunc->GetNumInArgsUsedInMemWrites(CalleeLoopIndexPlusOne);
								pair<STARSExprSetIter, bool> InsertResult;
								for (size_t InArgIndex = 0; InArgIndex < NumCalleeInArgMemAddrs; ++InArgIndex) {
									size_t MemExprWidth = CalleeFunc->GetInArgMemWriteWidth(CalleeLoopIndexPlusOne, InArgIndex);
									STARSExprSetIter CurrentMemExprIter = CalleeFunc->GetInArgExprUsedInMemWrite(CalleeLoopIndexPlusOne, InArgIndex);
									STARSExpression *CurrentMemAddrExpr = (*CurrentMemExprIter)->Clone();
									Changed |= this->InheritCalleeMemExpr(MemExprWidth, CurrentMemAddrExpr, CurrInst, LoopNum, LoopList);
								} // end for ( ... InArgIndex < NumCalleeInArgMemAddrs ...)
							}

							// Inherit all of the callee's inherited MemExprs that traced back to InArgs.
							for (size_t CalleeLoopIndexPlusOne = 0; CalleeLoopIndexPlusOne <= NumCalleeLoops; ++CalleeLoopIndexPlusOne) {
								size_t NumCalleeInheritedExprs = CalleeFunc->GetNumInheritedMemWriteExprs(CalleeLoopIndexPlusOne);
								for (size_t ExprIndex = 0; ExprIndex < NumCalleeInheritedExprs; ++ExprIndex) {
									size_t MemExprWidth = CalleeFunc->GetInheritedMemWriteWidth(CalleeLoopIndexPlusOne, ExprIndex);
									STARSExprSetIter ExprIter = CalleeFunc->GetInheritedMemWriteExpr(CalleeLoopIndexPlusOne, ExprIndex);
									STARSExpression *CurrentMemAddrExpr = (*ExprIter)->Clone();
									Changed |= this->InheritCalleeMemExpr(MemExprWidth, CurrentMemAddrExpr, CurrInst, LoopNum, LoopList);
								}
							}

							// Do the same for callee loop mem write range exprs.
							for (size_t LoopIndex = 0; LoopIndex < NumCalleeLoops; ++LoopIndex) {

								STARSExprBoundsIter ExprIter;
								for (ExprIter = CalleeFunc->GetFirstLoopMemWriteExpandedExprBoundsIter(LoopIndex); ExprIter != CalleeFunc->GetLastLoopMemWriteExpandedExprBoundsIter(LoopIndex); ++ExprIter) {
									STARSExpression *LowerExpr = ExprIter->first;
									STARSExpression *UpperExpr = ExprIter->second;
									
									if ((nullptr != LowerExpr) && (nullptr != UpperExpr)) {
										// Set up a range expr so we can re-use this->InheritCalleeMemRangeExpr()
										STARSExpression *TempMemRangeExpr = new STARSExpression();
										TempMemRangeExpr->SetLeftTree(LowerExpr->Clone());
										TempMemRangeExpr->SetRightTree(UpperExpr->Clone());
										TempMemRangeExpr->SetParentFunc(TempMemRangeExpr->GetLeftTree()->GetParentFunc());
										TempMemRangeExpr->SetParentInst(TempMemRangeExpr->GetLeftTree()->GetParentInst());
										TempMemRangeExpr->SetOriginalParentInst(TempMemRangeExpr->GetLeftTree()->GetOriginalParentInst());
										TempMemRangeExpr->SetOperator(SMP_LESS_THAN); // LowerBound < UpperBound is range expr
										size_t MemWriteByteWidth = LowerExpr->FindOrigMemOpByteWidth();
										if (0 == MemWriteByteWidth) {
											MemWriteByteWidth = UpperExpr->FindOrigMemOpByteWidth();
										}
										if (0 == MemWriteByteWidth) {
											SMP_msg("ERROR: expr Inherited byte width of 0 from CalleeFunc %s ; expr dump follows.\n",
												CalleeFunc->GetFuncName());
											LowerExpr->Dump(0);
											MemWriteByteWidth = global_STARS_program->GetSTARS_ISA_Bytewidth(); // default
										}

										Changed |= this->InheritCalleeMemRangeExpr(TempMemRangeExpr, CurrInst, MemWriteByteWidth, LoopNumOffsetByOne, LoopList);
									}
								} // end for all expanded expr pairs in callee func for current LoopIndex
							} // end for all loops in CalleeFunc

							// Do the same for callee looping string mem write range exprs.
							for (size_t LoopIndexPlusOne = 0; LoopIndexPlusOne <= CalleeFunc->GetNumLoops(); ++LoopIndexPlusOne) {
								size_t ExprIndexLimit = CalleeFunc->GetNumStringMemWriteRangeExprs(LoopIndexPlusOne);
								for (size_t i = 0; i < ExprIndexLimit; ++i) {
									STARSExprSetIter StringExprIter = CalleeFunc->GetStringMemWriteRangeExpr(LoopIndexPlusOne, i);
									STARSExpression *TempStringRangeExpr = (*StringExprIter);
									assert(nullptr != TempStringRangeExpr);
									STARSExpression *CalleeStringRangeExpr = TempStringRangeExpr->Clone();
									size_t MemWriteByteWidth = CalleeFunc->GetStringMemWriteRangeWidth(LoopIndexPlusOne, i);
									SMP_msg("INFO: SPARK: Inheriting CalleeStringRangeExpr from CalleeAddr %llx at CallAddr %llx\n",
										(uint64_t) CalleeAddr, (uint64_t) InstAddr);
									Changed |= this->InheritCalleeMemRangeExpr(CalleeStringRangeExpr, CurrInst, MemWriteByteWidth, LoopNumOffsetByOne, LoopList);
								}
							} // end for all loops in CalleeFunc
						}
					}
				}
			}
		}
	} // end for all blocks

	WritesMem = MemoryOutput;

	return Changed;
} // end of SMPFunction::ComputeInOutRegs()

// Return true if mem expr inherited and added to MemAddrExprWidthsFromCallees
bool SMPFunction::InheritCalleeMemExpr(size_t MemExprWidth, STARSExpression *CurrentMemAddrExpr, SMPInstr *CallInst, int LoopNum, const list<size_t> &LoopList) {
	// Trace actual arg register back, substitute source into clone of callee mem addr expr to produce
	//  the mem addr expr for this function.
	STARSOpndTypePtr InArgOp = CurrentMemAddrExpr->GetLeftOperand();
	STARS_ea_t InstAddr = CallInst->GetAddr();
	size_t LoopNumOffsetByOne = (size_t) (LoopNum + 1);
	bool changed = false;
	bool VerboseOutput = global_stars_interface->VerboseLoopsMode();
	if (!CurrentMemAddrExpr->HasLeftSubTree() && InArgOp->IsRegOp() && (!InArgOp->MatchesReg(MD_STACK_POINTER_REG))) { // simple LeftOperand == InArg case
		STARSDefUseIter UseIter = CallInst->FindUse(InArgOp);
		assert(UseIter != CallInst->GetLastUse());
		// Start changing values in our cloned expr to match this function.
		CurrentMemAddrExpr->SetParentFunc(this);
		CurrentMemAddrExpr->SetParentInst(CallInst);
		CurrentMemAddrExpr->SetLeftUseAddr(InstAddr);
		CurrentMemAddrExpr->SetLeftSSANum(UseIter->GetSSANum());
		CurrentMemAddrExpr->SetLeftPreLoopDefAddr(STARS_BADADDR);
		if (VerboseOutput) {
			SMP_msg("INFO: MemAddrExprFromCallee before expansion: Width: %zu", MemExprWidth);
			CurrentMemAddrExpr->Dump(0);
		}
		bool StoppedOnIV = false;
		bool changed = false;
		set<int> DummyLoopRegHashes;
		set<STARS_ea_t> StackPtrCopySet;
		int DepthCounter = 0;
		if (CurrentMemAddrExpr->ExpandExpr(InstAddr, (size_t) LoopNum, false, true, true, false, false, DummyLoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
			if (!StoppedOnIV) {
				// CurrentMemAddrExpr has been expanded successfully to an InArg or constant.
				CurrentMemAddrExpr->EvaluateConsts();
				bool Simplified = CurrentMemAddrExpr->SimplifyDriver();
				pair<STARSExprSetIter, bool> InsertResult = this->MemAddrExprsFromCallees[LoopNumOffsetByOne].insert(CurrentMemAddrExpr);
				if (InsertResult.second) { // new mem expr
					pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, MemExprWidth);
					this->MemAddrExprWidthsFromCallees[LoopNumOffsetByOne].push_back(InsertValue);
					if (VerboseOutput) {
						SMP_msg("INFO: MemAddExprFromCallee after expansion: Width: %zu", MemExprWidth);
						CurrentMemAddrExpr->Dump(0);
					}
					std::bitset<1 + MD_LAST_REG_NO> InArgRegNums;
					CurrentMemAddrExpr->ListInArgRegsUsed(InArgRegNums);
					// Mark all loops containing this block as having callee mem writes.
					for (list<size_t>::const_iterator LoopIter = LoopList.cbegin(); LoopIter != LoopList.cend(); ++LoopIter) {
						this->LoopHasCalleeMemWrites[*LoopIter] = true;
					}
					changed = true;
					this->MemRangeRegsBitmap |= InArgRegNums;

					if (LoopNum >= 0) {
						// If CurrentMemAddrExpr expands to a stack frame write in the loop's containing procedure,
						//  update the stack frame written maps.
						STARS_sval_t FinalStackPtrOffset;
						// CurrentMemAddrExpr has already expanded to the incoming stack pointer reg, so the
						//  expression is already normalized to a negative offset from the return address location.
						//  We pass in zero as the CurrentStackPtrOffset to avoid double-normalization.
						if (CurrentMemAddrExpr->IsStackPtrOffset(0, FinalStackPtrOffset)) {
							// Update for all loops that contain CallInst.
							int CallBlockNum = CallInst->GetBlock()->GetNumber();
							list<size_t> LoopList;
							this->BuildLoopList(CallBlockNum, LoopList);
							for (list<size_t>::const_iterator LoopIter = LoopList.cbegin(); LoopIter != LoopList.cend(); ++LoopIter) {
								size_t CurrLoopNum = (*LoopIter);
								this->UpdateStackBytesWrittenByLoop(FinalStackPtrOffset, MemExprWidth, CurrLoopNum);
								for (size_t RegNum = 0; RegNum < InArgRegNums.size(); ++RegNum) {
									if (InArgRegNums[RegNum]) {
										this->LoopMemRangeInArgRegsBitmap[CurrLoopNum].set(RegNum);
									}
								}
							}
						}
					}
				}
				else {
					SMP_msg("INFO: Expr insert collision at CallAddr %llx\n", (uint64_t)InstAddr);
				}
			}
			else { // StoppedOnIV
				SMP_msg("INFO: SPARK: Expr Expand() StoppedOnIV in InheritCalleeMemExpr() at %llx\n",
					(uint64_t) InstAddr);
				pair<STARSExprSetIter, bool> InsertResult = this->StoppedOnIVNonRangeExprs[LoopNumOffsetByOne].insert(CurrentMemAddrExpr);
				if (InsertResult.second) { // new Expr
					pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, MemExprWidth);
					this->StoppedOnIVNonRangeIterWidths[LoopNumOffsetByOne].push_back(InsertValue);
				}
			}
		}
		else {
			SMP_msg("ERROR: ExpandExpr() failure in InheritCalleeMemExpr() at CallAddr %llx\n", (uint64_t) InstAddr);
		}
	}
	return changed;
} // end of SMPFunction::InheritCalleeMemExpr()

bool SMPFunction::InheritCalleeMemRangeExpr(STARSExpression *CalleeMemRangeExpr, SMPInstr *CallInst, size_t MemWriteByteWidth, size_t LoopNumPlusOne, const list<size_t> &LoopList) {
	// Again, deal with the simple to analyze case in which we can find the InArg on the lhs easily.
	bool FoundInArg = false;
	bool Changed = false;
	bool VerboseOutput = global_stars_interface->VerboseLoopsMode();
	STARS_ea_t InstAddr = CallInst->GetAddr();

	// Start changing values in our cloned expr to match this function.
	CalleeMemRangeExpr->SetParentFunc(this);
	CalleeMemRangeExpr->SetParentInst(CallInst);
	bitset<1 + MD_LAST_REG_NO> InArgRegNums;
	CalleeMemRangeExpr->ListInArgRegsUsed(InArgRegNums);
	bool FoundInArgReg = InArgRegNums.any();

	if (FoundInArgReg) {
		bool ReplacedInArgSSANum = false;
		for (size_t RegNum = 0; RegNum < InArgRegNums.size(); ++RegNum) {
			bool StackPointerReg = (RegNum == MD_STACK_POINTER_REG);
			if (!StackPointerReg && InArgRegNums[RegNum]) {
				STARSOpndTypePtr CurrInArgOp = CallInst->MakeRegOpnd((STARS_regnum_t) RegNum);
				STARSDefUseIter UseIter = CallInst->FindUse(CurrInArgOp);
				if (UseIter != CallInst->GetLastUse()) {
					// Replace all SSANum == 0 instances of InArgOp with SSANum of InArg in this function.
					// NOTE: Might we need to do the same for multiple InArgs, used as limits, etc.?
					CalleeMemRangeExpr->SubstituteSSANum(CallInst, UseIter->GetSSANum(), CurrInArgOp);
					ReplacedInArgSSANum = true;
				}
				else {
					SMP_msg("ERROR: SPARK: Callee loop range does not trace to InArgUSE at call site %llx\n", (uint64_t) InstAddr);
				}
			}
		} // end for all InArgRegNums
		if (!ReplacedInArgSSANum) {
			SMP_msg("ERROR: SPARK: Callee loop range expr did not see InArgSSANums replaced at call site %llx\n", (uint64_t) InstAddr);
		}
		if (VerboseOutput) {
			SMP_msg("INFO: LoopCalleeMemRangeExpr before expansion:");
			CalleeMemRangeExpr->Dump(0);
		}
		bool StoppedOnIV = false;
		bool changed = false;
		set<int> DummyLoopRegHashes;
		set<STARS_ea_t> StackPtrCopySet;
		int DepthCounter = 0;
		if (CalleeMemRangeExpr->ExpandExpr(InstAddr, 0, false, true, true, false, false, DummyLoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
			if (!StoppedOnIV) {
				// CalleeMemRangeExpr has been expanded successfully to an InArg or constant.
				CalleeMemRangeExpr->EvaluateConsts();
				bool Simplified = CalleeMemRangeExpr->SimplifyDriver();
				pair<STARSExprSetIter, bool> InsertResult = this->LoopMemAddrExprsFromCalleeLoops[LoopNumPlusOne].insert(CalleeMemRangeExpr);
				if (InsertResult.second) { // new mem expr
					InArgRegNums.reset();
					this->HasMemExprsFromCalleeLoops = true;
					Changed = true;
					CalleeMemRangeExpr->ListInArgRegsUsed(InArgRegNums);
					pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, MemWriteByteWidth);
					this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].push_back(InsertValue);
					if (VerboseOutput) {
						SMP_msg("INFO: LoopCalleeMemRangeExpr after expansion:");
						CalleeMemRangeExpr->Dump(0);
					}
					this->MemRangeRegsBitmap |= InArgRegNums;
					// Mark all loops containing this block as having callee mem writes.
					for (list<size_t>::const_iterator LoopIter = LoopList.cbegin(); LoopIter != LoopList.cend(); ++LoopIter) {
						size_t CurrLoopNum = *LoopIter;
						this->LoopHasCalleeMemWrites[CurrLoopNum] = true;
						// Mark the InArg regs reached in the Expand() operation.
						for (size_t RegNum = 0; RegNum < InArgRegNums.size(); ++RegNum) {
							if (InArgRegNums[RegNum]) {
								this->LoopMemRangeInArgRegsBitmap[CurrLoopNum].set(RegNum);
							}
						}
					}
				}
				else {
					SMP_msg("INFO: Expr insert collision at CallAddr %llx\n", (uint64_t) InstAddr);
				}
			}
			else { // StoppedOnIV
				SMP_msg("ERROR: SPARK: Expr Expand() StoppedOnIV in InheritCalleeMemRangeExpr() at %llx\n",
					(uint64_t) InstAddr);
				this->CalleeMemExprProblems[LoopNumPlusOne] = true;
				pair<STARSExprSetIter, bool> InsertResult = this->StoppedOnIVMemRangeExprs[LoopNumPlusOne].insert(CalleeMemRangeExpr);
				if (InsertResult.second) { // new Expr
					pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, MemWriteByteWidth);
					this->StoppedOnIVMemRangeIterWidths[LoopNumPlusOne].push_back(InsertValue);
				}
			}
		}
		else {
			SMP_msg("ERROR: SPARK: ExpandExpr() failure in InheritCalleeMemRangeExpr at CallAddr %llx\n", (uint64_t) InstAddr);
			this->CalleeMemExprProblems[LoopNumPlusOne] = true;
		}
	}
	else if (!FoundInArgReg) {
		pair<STARSExprSetIter, bool> InsertResult = this->LoopMemAddrExprsFromCalleeLoops[LoopNumPlusOne].insert(CalleeMemRangeExpr);
		if (InsertResult.second) { // new mem expr
			this->HasMemExprsFromCalleeLoops = true;
			Changed = true;
			pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, MemWriteByteWidth);
			this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].push_back(InsertValue);
			if (VerboseOutput) {
				SMP_msg("INFO: Global static inherited LoopCalleeMemRangeExpr:");
				CalleeMemRangeExpr->Dump(0);
			}
			// Mark all loops containing this block as having callee mem writes.
			for (list<size_t>::const_iterator LoopIter = LoopList.cbegin(); LoopIter != LoopList.cend(); ++LoopIter) {
				size_t CurrLoopNum = *LoopIter;
				this->LoopHasCalleeMemWrites[CurrLoopNum] = true;
			}
		}
		else {
			SMP_msg("INFO: Expr insert collision at CallAddr %llx\n", (uint64_t) InstAddr);
		}
	}
	return Changed;
} // end of SMPFunction::InheritCalleeMemRangeExpr()

// Split relational RangeExpr into upper and lower bonds exprs and save them and their width
void SMPFunction::SplitAndSaveRelationalExpr(bool PositiveIncrement, std::size_t LoopNumPlusOne, size_t MemWidth, STARSExpression *RangeExpr) {
	assert(nullptr != RangeExpr);
	assert((0 <= LoopNumPlusOne) && (LoopNumPlusOne <= this->GetNumLoops()));
	bool VerboseOutput = global_stars_interface->VerboseLoopsMode();
	STARSExpression *LowerExpr;
	STARSExpression *UpperExpr;
	RangeExpr->SplitMemoryRangeExpr(PositiveIncrement, LowerExpr, UpperExpr);
	assert(nullptr != LowerExpr);
	assert(nullptr != UpperExpr);
	if (VerboseOutput) {
		SMP_msg("INFO: SPARK: RelationalExpr being split: \n");
		RangeExpr->Dump(0);
	}
	pair<STARSExprSetIter, bool> InsertResult = this->RelationalLowerBoundExprs[LoopNumPlusOne].insert(LowerExpr);
	pair<STARSExprSetIter, bool> InsertResult2 = this->RelationalUpperBoundExprs[LoopNumPlusOne].insert(UpperExpr);
	if (InsertResult.second || InsertResult2.second) { // new range if either one was new expr
		pair<STARSExprSetIter, STARSExprSetIter> BoundsIters(InsertResult.first, InsertResult2.first);
		pair<size_t, pair<STARSExprSetIter, STARSExprSetIter> > InsertValue(MemWidth, BoundsIters);
		this->RelationalMemWriteWidths[LoopNumPlusOne].push_back(InsertValue);
	}
	else {
		SMP_msg("INFO: SPARK: Duplicate RelationalExpr not saved.\n");
	}

	return;
} // end of SMPFunction::SplitAndSaveRelationalExpr()

// Build range expr for memory writes in looping string opcode
void SMPFunction::BuildLoopingStringMemExprs(SMPBasicBlock *CurrBlock, SMPInstr *CurrInst) {
	assert(CurrBlock->HasLoopingStringOpcode());
	unsigned short opcode = CurrInst->GetIDAOpcode();
	STARS_ea_t InstAddr = CurrInst->GetAddr();
	bool NonWritingLoopingStringOperation = ((opcode == STARS_NN_cmps) || (opcode == STARS_NN_scas) || (opcode == STARS_NN_outs));
	bool VerboseOutput = global_stars_interface->VerboseLoopsMode();
	if (!NonWritingLoopingStringOperation) {
		// Make Expr like: RDI < (RDI + (RCX*ByteWidth))
		int CurrBlockNum = CurrBlock->GetNumber();
		STARSOpndTypePtr FirstOpnd = CurrInst->GetOperand(0);
		uint16_t ByteWidth = FirstOpnd->GetByteWidth();
		STARSExpression *LowerBoundExpr = new STARSExpression();
		LowerBoundExpr->SetParentFunc(this);
		LowerBoundExpr->SetParentInst(CurrInst);
		LowerBoundExpr->SetOriginalParentInst(CurrInst);
		LowerBoundExpr->SetOperator(SMP_ASSIGN);
		STARSOpndTypePtr UseOp = CurrInst->MakeRegOpnd(STARS_x86_R_di);
		STARSDefUseIter UseIter = CurrInst->FindUse(UseOp);
		assert(UseIter != CurrInst->GetLastUse());
		LowerBoundExpr->SetLeftOperand(UseOp);
		LowerBoundExpr->SetLeftUseAddr(InstAddr);
		LowerBoundExpr->SetLeftSSANum(UseIter->GetSSANum());

		int LoopIndex = this->GetInnermostLoopNum(CurrBlockNum);
		size_t LoopNum = (size_t) LoopIndex;
		int LoopIndexPlusOne = LoopIndex + 1;
		bool InsideLoop = (0 <= LoopIndex);
		bool StoppedOnIV = false;
		bool changed = false;
		if (!InsideLoop) { // not in a loop
			LoopNum = 0;
		}
		set<int> LoopRegHashes;
		set<STARS_ea_t> StackPtrCopySet;
		int DepthCounter = 0;
		if (LowerBoundExpr->ExpandExpr(InstAddr, LoopNum, false, true, true, true, false, LoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
			if (StoppedOnIV) {
				this->SymbolicAnalysisProblems[LoopIndexPlusOne] = true;
				SMP_msg("ERROR: SPARK: LowerBoundExpr Expand() StoppedOnIV in BuildLoopingStringMemExprs() at %llx\n",
					(uint64_t)InstAddr);
				pair<STARSExprSetIter, bool> InsertResult = this->StoppedOnIVMemRangeExprs[LoopIndexPlusOne].insert(LowerBoundExpr);
				if (InsertResult.second) { // new Expr
					pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, ByteWidth);
					this->StoppedOnIVMemRangeIterWidths[LoopIndexPlusOne].push_back(InsertValue);
				}
			}
			else {
				LowerBoundExpr->EvaluateConsts();
				LowerBoundExpr->SimplifyDriver();
				// The upper bound is RDI + RCX*ByteWidth. Optimize away ByteWidth == 1.
				//  Clone the expanded and simplified RDI from LowerBoundExpr to save time.
				STARSExpression *UpperBoundExpr = new STARSExpression();
				UpperBoundExpr->SetParentFunc(this);
				UpperBoundExpr->SetParentInst(CurrInst);
				UpperBoundExpr->SetOriginalParentInst(CurrInst);
				UpperBoundExpr->SetOperator(SMP_ADD);
				UpperBoundExpr->SetLeftTree(LowerBoundExpr->Clone());
				UpperBoundExpr->SetLeftPreLoopDefAddr(LowerBoundExpr->GetLeftPreLoopDefAddr());
				STARSOpndTypePtr CounterOp = CurrInst->MakeRegOpnd(STARS_x86_R_cx);
				STARSDefUseIter CounterUseIter = CurrInst->FindUse(CounterOp);
				assert(CounterUseIter != CurrInst->GetLastUse());
				if (1 == ByteWidth) { // Simplify RCX * 1 to RCX
					UpperBoundExpr->SetRightOperand(CounterOp);
					UpperBoundExpr->SetRightUseAddr(InstAddr);
					UpperBoundExpr->SetRightSSANum(CounterUseIter->GetSSANum());
				}
				else { // Create RCX*ByteWidth right tree.
					STARSExpression *RightTree = new STARSExpression();
					RightTree->SetParentFunc(this);
					RightTree->SetParentInst(CurrInst);
					RightTree->SetOriginalParentInst(CurrInst);
					RightTree->SetOperator(SMP_U_MULTIPLY);
					RightTree->SetLeftOperand(CounterOp);
					RightTree->SetLeftUseAddr(CurrInst->GetAddr());
					RightTree->SetLeftSSANum(CounterUseIter->GetSSANum());
					RightTree->SetRightOperand(CurrInst->MakeImmediateOpnd((STARS_uval_t)ByteWidth));
					UpperBoundExpr->SetRightTree(RightTree);
				}
				// Expand only the right side, as left side was expanded, simplified and cloned from lower bound.
				DepthCounter = 0;
				if (UpperBoundExpr->ExpandExpr(InstAddr, LoopNum, true, true, true, true, false, LoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
					if (!StoppedOnIV) {
						if (InsideLoop) {
							this->LoopRegHashSets[LoopNum] = LoopRegHashes;
						}
						if (UpperBoundExpr->HasRightSubTree()) {
							UpperBoundExpr->GetRightTree()->EvaluateConsts();
							UpperBoundExpr->GetRightTree()->SimplifyExpr(UpperBoundExpr);
							UpperBoundExpr->SimplifyDriver();
							// Success. Record exprs in container.
							STARSExpression *RangeExpr = new STARSExpression();
							RangeExpr->SetOperator(SMP_LESS_THAN);
							RangeExpr->SetParentFunc(this);
							RangeExpr->SetParentInst(LowerBoundExpr->GetParentInst());
							RangeExpr->SetOriginalParentInst(LowerBoundExpr->GetOriginalParentInst());
							// If LowerBoundExpr is just SMP_ASSIGN and an operand, then grab the operand as our LeftOperand.
							//  Otherwise, grab its left tree.
							if (LowerBoundExpr->GetOperator() == SMP_ASSIGN) {
								RangeExpr->SetLeftSSANum(LowerBoundExpr->GetLeftSSANum());
								RangeExpr->SetLeftPreLoopDefAddr(LowerBoundExpr->GetLeftPreLoopDefAddr());
								if (LowerBoundExpr->HasLeftSubTree()) {
									RangeExpr->SetLeftTree(LowerBoundExpr->GetLeftTree());
								}
								else {
									RangeExpr->SetLeftOperand(LowerBoundExpr->GetLeftOperand());
									RangeExpr->SetLeftUseAddr(LowerBoundExpr->GetLeftUseAddr());
								}
							}
							else { // complex expression
								RangeExpr->SetLeftTree(LowerBoundExpr);
							}
							// Ditto for UpperBoundExpr, except it cannot be a simple SMP_ASSIGN.
							RangeExpr->SetRightTree(UpperBoundExpr);
							RangeExpr->SetRightPreLoopDefAddr(UpperBoundExpr->GetLeftPreLoopDefAddr());
							pair<STARSExprSetIter, bool> InsertResult = this->StringMemWriteRangeExprs[(size_t)LoopIndexPlusOne].insert(RangeExpr);
							if (InsertResult.second) { // new memory range
								if (VerboseOutput) {
									SMP_msg("INFO: SPARK: LoopingStringRange at %llx\n", InstAddr);
									RangeExpr->Dump(0);
								}
								pair<size_t, STARSExprSetIter> InsertValue(ByteWidth, InsertResult.first);
								this->StringMemWriteRangeWidths[(size_t)LoopIndexPlusOne].push_back(InsertValue);
								// We have already computed the upper bound taking into account the ByteWidth, so pass
								//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
								this->SplitAndSaveRelationalExpr(true, (size_t)LoopIndexPlusOne, 0, RangeExpr);
							}
						}
						else {
							SMP_msg("ERROR: SPARK: No RightTree for BuildLoopingStringMemExprs() at %llx\n",
								(uint64_t) InstAddr);
							this->SymbolicAnalysisProblems[LoopIndexPlusOne] = true;
						}
					}
					else { // StoppedOnIV
						SMP_msg("ERROR: SPARK: UpperBoundExpr Expand() StoppedOnIV in BuildLoopingStringMemExprs() at %llx\n",
							(uint64_t) InstAddr);
						this->SymbolicAnalysisProblems[LoopIndexPlusOne] = true;
						pair<STARSExprSetIter, bool> InsertResult = this->StoppedOnIVMemRangeExprs[LoopIndexPlusOne].insert(UpperBoundExpr);
						if (InsertResult.second) { // new Expr
							pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, ByteWidth);
							this->StoppedOnIVMemRangeIterWidths[LoopIndexPlusOne].push_back(InsertValue);
						}
					}
				}
				else { // error on UpperBoundExpr->ExpandExpr()
					this->SymbolicAnalysisProblems[LoopIndexPlusOne] = true;
					SMP_msg("ERROR: SPARK: UpperBoundExpr ExpandExpr() failed in BuildLoopingStringMemExprs() at %llx\n",
						(uint64_t) InstAddr);
				}
			}
		}
		else {
			this->SymbolicAnalysisProblems[LoopIndexPlusOne] = true;
			SMP_msg("ERROR: SPARK: LowerBoundExpr ExpandExpr() failed in BuildLoopingStringMemExprs() at %llx\n",
				(uint64_t) InstAddr);
		}
	}
	return;
} // end of SMPFunction::BuildLoopingStringMemExprs()

// Four methods to get values from the maps of global reg/SSA to FG info.
//  For local names, see corresponding methods in SMPBasicBlock.
unsigned short SMPFunction::GetDefSignMiscInfo(int DefHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter != this->GlobalDefFGInfoBySSA.end())
		return MapIter->second.SignMiscInfo;
	else
		return 0;
} // end of SMPFunction::GetDefSignMiscInfo()

unsigned short SMPFunction::GetStackDefSignMiscInfo(STARS_ea_t InstAddr) {
	map<STARS_ea_t, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->StackDefFGInfo.find(InstAddr);
	assert(MapIter != this->StackDefFGInfo.end());

	return MapIter->second.SignMiscInfo;
}

unsigned short SMPFunction::GetUseSignMiscInfo(int UseHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
	if (MapIter != this->GlobalUseFGInfoBySSA.end())
		return MapIter->second.SignMiscInfo;
	else
		return 0;
} // end of SMPFunction::GetUseSignMiscInfo()

unsigned short SMPFunction::GetStackUseSignMiscInfo(STARS_ea_t InstAddr) {
	map<STARS_ea_t, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->StackUseFGInfo.find(InstAddr);
	assert(MapIter != this->StackUseFGInfo.end());

	return MapIter->second.SignMiscInfo;
}

unsigned short SMPFunction::GetDefWidthTypeInfo(int DefHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter != this->GlobalDefFGInfoBySSA.end())
		return MapIter->second.SizeInfo;
	else
		return 0;
} // end of SMPFunction::GetDefWidthTypeInfo()

unsigned short SMPFunction::GetUseWidthTypeInfo(int UseHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
	if (MapIter != this->GlobalUseFGInfoBySSA.end())
		return MapIter->second.SizeInfo;
	else
		return 0;
} // end of SMPFunction::GetUseWidthTypeInfo()

struct FineGrainedInfo SMPFunction::GetDefFGInfo(int DefHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter != this->GlobalDefFGInfoBySSA.end())
		return MapIter->second;
	else {
		struct FineGrainedInfo EmptyFG;
		EmptyFG.SignMiscInfo = 0;
		EmptyFG.SizeInfo = 0;
		return EmptyFG;
	}
} // end of SMPFunction::GetDefFGInfo()

struct FineGrainedInfo SMPFunction::GetUseFGInfo(int UseHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
	if (MapIter != this->GlobalUseFGInfoBySSA.end())
		return MapIter->second;
	else {
		struct FineGrainedInfo EmptyFG;
		EmptyFG.SignMiscInfo = 0;
		EmptyFG.SizeInfo = 0;
		return EmptyFG;
	}
} // end of SMPFunction::GetUseFGInfo()

// Fetch SPARK Ada control flow type for jump at InstAddr; return FALL_THROUGH if none found.
ControlFlowType SMPFunction::GetControlFlowType(STARS_ea_t InstAddr) const {
	ControlFlowType JumpTypeCode = FALL_THROUGH;
	map<STARS_ea_t, unsigned short>::const_iterator MapIter = this->ControlFlowMap.find(InstAddr);
	if (MapIter != this->ControlFlowMap.end()) {
		JumpTypeCode = (ControlFlowType) MapIter->second;
	}
	return JumpTypeCode;
} // end of SMPFunction::GetControlFlowType()

// Set counter to zero, or insert zero counter if none found
void SMPFunction::ResetJumpToFollowNodeCounter(STARS_ea_t InstAddr) {
	map<STARS_ea_t, uint16_t>::iterator MapIter = this->JumpToFollowNodeCounterMap.find(InstAddr);
	if (MapIter != this->JumpToFollowNodeCounterMap.end()) { // found it
		MapIter->second = 0;
	}
	else {
		pair<STARS_ea_t, uint16_t> CounterItem(InstAddr, 0);
		pair<map<STARS_ea_t, uint16_t>::iterator, bool> InsertResult = this->JumpToFollowNodeCounterMap.insert(CounterItem);
		assert(InsertResult.second);
	}
	return;
} // end of SMPFunction::ResetJumpToFollowNodeCounter()

// Increment counter, or insert count of 1 if none found
void SMPFunction::IncrementJumpToFollowNodeCounter(STARS_ea_t InstAddr) {
	map<STARS_ea_t, uint16_t>::iterator MapIter = this->JumpToFollowNodeCounterMap.find(InstAddr);
	if (MapIter != this->JumpToFollowNodeCounterMap.end()) { // found it
		++MapIter->second;
	}
	else {
		pair<STARS_ea_t, uint16_t> CounterItem(InstAddr, 1);
		pair<map<STARS_ea_t, uint16_t>::iterator, bool> InsertResult = this->JumpToFollowNodeCounterMap.insert(CounterItem);
		assert(InsertResult.second);
	}
	return;
}

// Add a caller to the list of all callers of this function.
void SMPFunction::AddCallSource(STARS_ea_t addr) {
	// Convert call instruction address to beginning address of the caller.
	STARS_Function_t *FuncInfo = SMP_get_func(addr);
	if (NULL == FuncInfo) {
		SMP_msg("SERIOUS WARNING: Call location %llx not in a function.\n", (unsigned long long) addr);
		return;
	}
	STARS_ea_t FirstAddr = FuncInfo->get_startEA();
	assert(STARS_BADADDR != FirstAddr);
	this->AllCallSources.insert(FirstAddr);
	this->AllCallSites.insert(addr);
	return;
} // end of SMPFunction::AddCallSource()

// Add a direct call target; return true if new target, false if target already added
bool SMPFunction::AddDirectCallTarget(STARS_ea_t addr) {
	assert(STARS_BADADDR != addr);
	pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->DirectCallTargets.insert(addr);
	if (InsertResult.second) { // new direct call target
		this->AllCallTargets.push_back(addr);
	}
	return InsertResult.second;
}

// Add an indirect call target; return true if new target, false if target already added
bool SMPFunction::AddIndirectCallTarget(STARS_ea_t addr) {
	this->SetHasIndirectCalls();
	assert(STARS_BADADDR != addr);
	pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->IndirectCallTargets.insert(addr);
	if (InsertResult.second) { // new indirect call target
		this->AllCallTargets.push_back(addr);
	}
	return InsertResult.second;
}

// Remove TargetAddr from DirectCallTargets and AllCallTargets.
set<STARS_ea_t>::iterator SMPFunction::RemoveDirectCallTarget(STARS_ea_t TargetAddr) {
	set<STARS_ea_t>::iterator TargetIter = this->DirectCallTargets.find(TargetAddr);
	set<STARS_ea_t>::iterator ReturnIter = TargetIter;
	if (TargetIter != this->DirectCallTargets.end()) {
		++ReturnIter;
		this->DirectCallTargets.erase(TargetIter);
		this->RebuildCallTargets(); // remove from AllCallTargets
	}
	return ReturnIter;
}

// Remove TargetAddr from IndirectCallTargets and AllCallTargets.
bool SMPFunction::RemoveIndirectCallTarget(STARS_ea_t TargetAddr) {
	set<STARS_ea_t>::iterator TargetIter = this->IndirectCallTargets.find(TargetAddr);
	bool RemovedTarget = false;
	if (TargetIter != this->IndirectCallTargets.end()) {
		this->IndirectCallTargets.erase(TargetIter);
		RemovedTarget = true;
		this->RebuildCallTargets(); // remove from AllCallTargets
	}
	return RemovedTarget;
}

// Add to map of CallAddr => set of arg-passing addrs
void SMPFunction::AddBufferCallWithInArg(const CallAddrArgDefAddrPair InsertVal) {
	STARS_ea_t CallAddr = InsertVal.first;
	STARS_ea_t ArgAddr = InsertVal.second;

	// Get the argument operand via the ArgAddr.
	SMPInstr *ArgDefInst = this->GetInstFromAddr(ArgAddr);
	assert(nullptr != ArgDefInst);
	STARSDefUseIter ArgDefIter = ArgDefInst->GetFirstNonFlagsDef();
	assert(ArgDefIter != ArgDefInst->GetLastDef());
	STARSOpndTypePtr ArgOp = ArgDefIter->GetOp();

	// Create the arg definition pair.
	STARSDefinition ArgDefn(ArgOp, ArgAddr);

	map<STARS_ea_t, STARSDefinitionSet >::iterator FindIter = this->BufferCallInArgsMap.find(CallAddr);
	if (FindIter == this->BufferCallInArgsMap.end()) {
		// Create a new set of ArgAddr definitions.
		STARSDefinitionSet ArgDefnSet;
		(void) ArgDefnSet.insert(ArgDefn);
		pair<STARS_ea_t, STARSDefinitionSet > InsertVal(CallAddr, ArgDefnSet);
		(void) this->BufferCallInArgsMap.insert(InsertVal);
	}
	else { // Add ArgDefn to the existing set.
		(void) FindIter->second.insert(ArgDefn);
	}
	return;
} // end of SMPFunction::AddBufferCallWithInArg()

// add map entry to LeaInstOpMap
void SMPFunction::AddLeaOperand(STARS_ea_t addr, STARSOpndTypePtr LeaOperand) {
	pair<STARS_ea_t, STARSOpndTypePtr> InsertValue(addr, LeaOperand);
	pair<map<STARS_ea_t, STARSOpndTypePtr>::iterator, bool> InsertResult;
	InsertResult = this->LeaInstOpMap.insert(InsertValue);
	if (!(InsertResult.second)) { // already existed; replace
		map<STARS_ea_t, STARSOpndTypePtr>::iterator FindIter = this->LeaInstOpMap.find(addr);
		assert(FindIter != this->GetLastLeaOperand());
		FindIter->second = LeaOperand;
	}
	return;
}

// Add input arguments to the NormalizedStackOpsMap.
void SMPFunction::AddNormalizedStackOperand(STARSOpndTypePtr OldOp, STARS_ea_t InstAddr, STARSOpndTypePtr NormalizedOp) {
	bool DuplicateCase = false; // e.g. inc [esp+8] will have [esp+8] as a DEF and a USE and maps will see [esp+8] twice
#if SMP_DEBUG_DATAFLOW_VERBOSE
	bool DebugFlag = (InstAddr == 0x8048463);
#endif
	pair<map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator, bool> InsertResult;
	pair<map<STARSDefinition, map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator, LessDefinition>::iterator, bool> InverseInsertResult;
	pair<STARSOpndTypePtr, STARS_ea_t> OldValue(OldOp, InstAddr);
	pair<STARSOpndTypePtr, STARS_ea_t> InverseValue(OldOp, InstAddr); // OldOp was NormalizedOp when it was inserted previously
	pair<pair<STARSOpndTypePtr, STARS_ea_t>, STARSOpndTypePtr> InsertValue(OldValue, NormalizedOp);
	pair<STARSOpndTypePtr, STARS_ea_t> InverseInsertValue(NormalizedOp, InstAddr);
	map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator OldIter = this->NormalizedStackOpsMap.begin();
	pair<STARSDefinition, map<pair<STARSOpndTypePtr, STARS_ea_t>, STARSOpndTypePtr, LessDefinition>::iterator> InverseInsertTriple(InverseInsertValue, OldIter);
	map<STARSDefinition, map<pair<STARSOpndTypePtr, STARS_ea_t>, STARSOpndTypePtr, LessDefinition>::iterator>::iterator InverseIter;

	// If this function calls alloca(), stack operands could be normalized more than once.
	//  Before we proceed, we update an old entry instead of inserting a new entry.
	if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) {
		InverseIter = this->InverseNormalizedStackOpsMap.find(InverseValue);
		if (InverseIter != this->InverseNormalizedStackOpsMap.end()) {
			// We have our alloca() update case. We formerly mapped <A, InstAddr> to B.
			//  Now B is being normalized to C. All we want to do is change the original
			//  map entry so that we map <A, InstAddr> to C. In this manner, A is always the
			//  original un-normalized stack op, available for lookup from an RTL.
			OldIter = InverseIter->second; // OldIter points at map of <A, InstAddr> to B.
			OldIter->second = NormalizedOp; // Change B to C
			// Now we want to erase the Inverse map entry and insert a new one that maps
			//  <C, InstAddr> to OldIter instead of mapping <B, InstAddr> to OldIter.
			(void) this->InverseNormalizedStackOpsMap.erase(InverseIter);
			InverseInsertTriple.second = OldIter;
			InverseInsertResult = this->InverseNormalizedStackOpsMap.insert(InverseInsertTriple);
			assert(InverseInsertResult.second);
			return;
		}
		else {
			// We might have the final difficult case: We have a combination of CallsAlloca and the
			//  DuplicateCase described below (e.g. an increment of a stack location produces a DEF
			//  and a USE of the same location, causing duplicate mappings to be attempted). We need
			//  to detect the duplicate case here. What will happen is that, on the first call to this
			//  method, we will map <A, InstAddr> to B, and reverse-map <B, InstAddr> to A. On the second
			//  call to this method, we will detect the duplicate case and exit. On the third call, caused
			//  by CallsAlloca, we are asked to map <B, InstAddr> to C, and we will correctly hit the code
			//  just above, in the if-clause, to fix the A->B mapping to be an A->C mapping, and we will
			//  erase the reverse mapping of B->A and replace it with the C->A reverse mapping. On the
			//  fourth call to this method, we will not find a reverse mapping B->A any more, so the if-clause
			//  does not execute. We can only detect this case by finding an existing C->A reverse mapping
			//  and an existing A->C mapping to confirm our inference.
			pair<STARSOpndTypePtr, STARS_ea_t> TestInverseValue(NormalizedOp, InstAddr);
			InverseIter = this->InverseNormalizedStackOpsMap.find(TestInverseValue);
			if (InverseIter != this->InverseNormalizedStackOpsMap.end()) {
				// Found existing C->A inverse mapping. Is there an A->C mapping to confirm
				//  our interpretation of the situation?
				pair<STARSOpndTypePtr, STARS_ea_t> TestOldValue(InverseIter->second->first.first, InstAddr);
				map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator TestOldIter;
				TestOldIter = this->NormalizedStackOpsMap.find(TestOldValue);
				if (TestOldIter != this->NormalizedStackOpsMap.end()) {
					// We found a mapping from <A, InstAddr>.
					if (IsEqOp(NormalizedOp, TestOldIter->second)) {
						// The mapping is A->C as suspected.
						return; // duplication; nothing to do in either map.
					}
				}
			}
		}
	}
	// At this point, we have no inverse map entry to worry about, because we are
	//  normalizing this operand for the first time.
	InsertResult = this->NormalizedStackOpsMap.insert(InsertValue);
	OldIter = InsertResult.first;
	if (!(InsertResult.second)) {
		// Already had an entry. That should mean a rare case such as "inc [esp+8]" which
		//  produces a USE and a DEF of the same address. We can confirm that the map has
		//  the same normalized operand we were trying to insert. Otherwise, the collision
		//  is fatal.
		STARSOpndTypePtr OldOldOp = InsertResult.first->first.first;
		STARSOpndTypePtr OldNormalizedOp = InsertResult.first->second;
		assert(IsEqOp(OldOldOp, OldOp) && IsEqOp(OldNormalizedOp, NormalizedOp));
		DuplicateCase = true;
	}
	if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) {
		// We need to add an entry to the inverse map.
		InverseInsertTriple.second = OldIter;
		InverseInsertResult = this->InverseNormalizedStackOpsMap.insert(InverseInsertTriple);
		assert(InverseInsertResult.second || DuplicateCase);
	}
#if SMP_DEBUG_DATAFLOW_VERBOSE
	if (DebugFlag) {
		map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator StackMapIter;
		SMP_msg("DEBUG: NormalizedStackOpsMap size: %zd\n", this->NormalizedStackOpsMap.size());
		for (StackMapIter = this->NormalizedStackOpsMap.begin(); StackMapIter != this->NormalizedStackOpsMap.end(); ++ StackMapIter) {
			STARSOpndTypePtr OldOp = StackMapIter->first.first;
			STARS_ea_t InstAddr = StackMapIter->first.second;
			SMP_msg("DEBUG: NormalizedStackOps: ");
			PrintOperand(OldOp);
			SMP_msg(" addr: %llx\n", (unsigned long long) InstAddr);
		}
	}
#endif
	return;
} // SMPFunction::AddNormalizedStackOperand()

// Update MaxDirectStackAccessDelta
void SMPFunction::UpdateMaxDirectStackAccessOffset(STARS_sval_t NewOffset) {
	if (NewOffset > this->MaxDirectStackAccessDelta) {
		this->MaxDirectStackAccessDelta = NewOffset;
	}
	return;
}


// insert jump type into ControlFlowMap
void SMPFunction::SetControlFlowType(STARS_ea_t InstAddr, ControlFlowType JumpTypeCode) {
	map<STARS_ea_t, unsigned short>::iterator MapIter = this->ControlFlowMap.find(InstAddr);
	if (MapIter == this->ControlFlowMap.end()) { // no old entry; insert
		pair<STARS_ea_t, unsigned short> InsertPair(InstAddr, (unsigned short) JumpTypeCode);
		pair<map<STARS_ea_t, unsigned short>::iterator, bool> InsertResult = this->ControlFlowMap.insert(InsertPair);
		assert(InsertResult.second);
		MapIter = InsertResult.first;
	}
	else { // old entry found; update
		// We permit a LOOP_EXIT to become a SHORT_CIRCUIT_LOOP_EXIT
		bool ExitCase = (SHORT_CIRCUIT_LOOP_EXIT == JumpTypeCode) && (LOOP_EXIT == MapIter->second);
		if (ExitCase) {
			MapIter->second = (unsigned short) JumpTypeCode;
		}
		else if (MapIter->second != ((unsigned short) JumpTypeCode)) {
			SMP_msg("WARNING: SPARK Ada jump type changing from %u to %u at %llx: rejected\n", MapIter->second,
				(unsigned short) JumpTypeCode, (unsigned long long) InstAddr);
#if 0  // refuse to override past value
			MapIter->second = (unsigned short) JumpTypeCode;
#endif
		}
	}
	return;
} // end of SMPFunction::SetControlFlowType()

// Insert SCCP value for global name; change old entry if already found.
STARSSCCPMapIter SMPFunction::InsertGlobalConstValue(int DefHashValue, struct STARS_SCCP_Const_Struct NewConstEntry) {
#if STARS_DEBUG_FUNC_SCCP_VERBOSE
	if (0x807a530 == this->FirstEA) {
		SMP_msg("DEBUG: SCCP: Inserting for DefHashValue %x Type: %d Value: %llx\n",
			DefHashValue, (int) NewConstEntry.ConstType, (uint64_t) NewConstEntry.ConstValue);
	}
#endif
	STARSSCCPMapIter MapIter = this->FindConstValue(DefHashValue);
	if (MapIter == this->GetLastConstValueIter()) { // no old entry; insert
		pair<int, struct STARS_SCCP_Const_Struct> InsertPair(DefHashValue, NewConstEntry);
		pair<STARSSCCPMapIter, bool> InsertResult = this->ConstantDefs.insert(InsertPair);
		assert(InsertResult.second);
		MapIter = InsertResult.first;
	}
	else { // old entry found; update
		MapIter->second = NewConstEntry;
	}
	return MapIter;
} // end of SMPFunction::InsertGlobalConstValue()


// Return RTLop if not stack opnd; return normalized RTLop otherwise.
STARSOpndTypePtr SMPFunction::GetNormalizedOperand(STARS_ea_t InstAddr, const STARSOpndTypePtr &RTLop) {
	STARSOpndTypePtr NormOp = nullptr;
#if SMP_DEBUG_DATAFLOW_VERBOSE
	bool DebugFlag = (0x8048463 == InstAddr);
	if (DebugFlag) {
		map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator StackMapIter;
		SMP_msg("DEBUG: NormalizedStackOpsMap size: %zd\n", this->NormalizedStackOpsMap.size());
		for (StackMapIter = this->NormalizedStackOpsMap.begin(); StackMapIter != this->NormalizedStackOpsMap.end(); ++ StackMapIter) {
			STARSOpndTypePtr OldOp = StackMapIter->first.first;
			STARS_ea_t InstAddr = StackMapIter->first.second;
			SMP_msg("DEBUG: NormalizedStackOps: ");
			PrintOperand(OldOp);
			SMP_msg(" addr: %llx\n", (unsigned long long) InstAddr);
		}
		SMP_msg(" RTLop to search for: ");
		PrintOperand(RTLop);
	}
#endif
	if (MDIsStackAccessOpnd(RTLop, this->UsesFramePointer())) {
		pair<STARSOpndTypePtr, STARS_ea_t> OldDefn(RTLop, InstAddr);
		map<STARSDefinition, STARSOpndTypePtr, LessDefinition>::iterator FindIter = this->NormalizedStackOpsMap.find(OldDefn);
		assert(this->NormalizedStackOpsMap.end() != FindIter);
		NormOp = FindIter->second;
	}
	else {
		NormOp = RTLop;
	}
	return NormOp;
} // end of SMPFunction::GetNormalizedOperand()

// Get reg type from all call sites; compute if called for the first time.
SMPOperandType SMPFunction::GetIncomingRegType(STARS_regnum_t RegNum) {
	SMPOperandType InRegType = UNINIT;
	if (RegNum < (STARS_regnum_t)this->IncomingRegTypes.size()) {
		InRegType = this->IncomingRegTypes[RegNum];
	}
	return InRegType;
} // end of SMPFunction::GetIncomingRegType()

// return true if LiveInOp found in MarkerInst DEFs
bool SMPFunction::GetMarkerInstDefType(STARSOpndTypePtr &LiveInOp, SMPOperandType &MarkerDefType) {
	bool success = this->IsLiveIn(LiveInOp);
	if (success) {
		SMPInstr *MarkerInst = this->Instrs.front();
		success = (MarkerInst->IsMarkerInst());
		if (success) {
			STARSDefUseIter DefIter = MarkerInst->FindDef(LiveInOp);
			success = (DefIter != MarkerInst->GetLastDef());
			if (success) {
				MarkerDefType = DefIter->GetType();
			}
		}
	}
	return success;
} // end of SMPFunction::GetMarkerInstDefType()

// Eight methods to set values into the maps of global reg/stack/SSA to FG info.
//  For local names, see corresponding methods in SMPBasicBlock.
void SMPFunction::UpdateDefSignMiscInfo(int DefHashValue, unsigned short NewInfo) {
	map<int, struct FineGrainedInfo>::iterator MapIter;
	pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter == this->GlobalDefFGInfoBySSA.end()) {
		// Not found; insert first.
		struct FineGrainedInfo NewFGInfo;
		NewFGInfo.SignMiscInfo = NewInfo;
		NewFGInfo.SizeInfo = 0;
		pair<int, struct FineGrainedInfo> MapItem(DefHashValue, NewFGInfo);
		MapResult = this->GlobalDefFGInfoBySSA.insert(MapItem);
		assert(MapResult.second); // Was not previously found, insertion must work.
	}
	else { // found; just OR in the new bits.
		MapIter->second.SignMiscInfo |= NewInfo;
	}

	return;
} // end of SMPFunction::UpdateDefSignMiscInfo()

void SMPFunction::UpdateStackDefSignMiscInfo(STARS_ea_t InstAddr, unsigned short NewInfo) {
	map<STARS_ea_t, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->StackDefFGInfo.find(InstAddr);
	assert(MapIter != this->StackDefFGInfo.end());
	// found; just OR in the new bits.
	MapIter->second.SignMiscInfo |= NewInfo;

	return;
} // end of SMPFunction::UpdateStackDefSignMiscInfo()

void SMPFunction::UpdateUseSignMiscInfo(int UseHashValue, unsigned short NewInfo) {
	map<int, struct FineGrainedInfo>::iterator MapIter;
	pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;

	MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
	if (MapIter == this->GlobalUseFGInfoBySSA.end()) {
		// Not found; insert first.
		struct FineGrainedInfo NewFGInfo;
		NewFGInfo.SignMiscInfo = NewInfo;
		NewFGInfo.SizeInfo = 0;
		pair<int, struct FineGrainedInfo> MapItem(UseHashValue, NewFGInfo);
		MapResult = this->GlobalUseFGInfoBySSA.insert(MapItem);
		assert(MapResult.second); // Was not previously found, insertion must work.
	}
	else { // found; just OR in the new bits.
		MapIter->second.SignMiscInfo |= NewInfo;
	}

	return;
} // end of SMPFunction::UpdateUseSignMiscInfo()

void SMPFunction::UpdateStackUseSignMiscInfo(STARS_ea_t InstAddr, unsigned short NewInfo) {
	map<STARS_ea_t, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->StackUseFGInfo.find(InstAddr);
	assert(MapIter != this->StackUseFGInfo.end());
	// found; just OR in the new bits.
	MapIter->second.SignMiscInfo |= NewInfo;

	return;
} // end of SMPFunction::UpdateStackUseSignMiscInfo()

void SMPFunction::UpdateDefWidthTypeInfo(int DefHashValue, unsigned short NewInfo) {
	map<int, struct FineGrainedInfo>::iterator MapIter;
	pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter == this->GlobalDefFGInfoBySSA.end()) {
		// Not found; insert first.
		struct FineGrainedInfo NewFGInfo;
		NewFGInfo.SignMiscInfo = 0;
		NewFGInfo.SizeInfo = NewInfo;
		pair<int, struct FineGrainedInfo> MapItem(DefHashValue, NewFGInfo);
		MapResult = this->GlobalDefFGInfoBySSA.insert(MapItem);
		assert(MapResult.second); // Was not previously found, insertion must work.
	}
	else { // found; just OR in the new bits.
		MapIter->second.SizeInfo |= NewInfo;
	}

	return;
} // end of SMPFunction::UpdateDefWidthTypeInfo()

void SMPFunction::UpdateUseWidthTypeInfo(int UseHashValue, unsigned short NewInfo) {
	map<int, struct FineGrainedInfo>::iterator MapIter;
	pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;

	MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
	if (MapIter == this->GlobalUseFGInfoBySSA.end()) {
		// Not found; insert first.
		struct FineGrainedInfo NewFGInfo;
		NewFGInfo.SignMiscInfo = 0;
		NewFGInfo.SizeInfo = NewInfo;
		pair<int, struct FineGrainedInfo> MapItem(UseHashValue, NewFGInfo);
		MapResult = this->GlobalUseFGInfoBySSA.insert(MapItem);
		assert(MapResult.second); // Was not previously found, insertion must work.
	}
	else { // found; just OR in the new bits.
		MapIter->second.SizeInfo |= NewInfo;
	}

	return;
} // end of SMPFunction::UpdateUseWidthTypeInfo()

void SMPFunction::UpdateDefFGInfo(int DefHashValue, struct FineGrainedInfo NewFG) {
	map<int, struct FineGrainedInfo>::iterator MapIter;
	pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter == this->GlobalDefFGInfoBySSA.end()) {
		// Not found; insert it.
		pair<int, struct FineGrainedInfo> MapItem(DefHashValue, NewFG);
		MapResult = this->GlobalDefFGInfoBySSA.insert(MapItem);
		assert(MapResult.second); // Was not previously found, insertion must work.
	}
	else { // found; just put in the new bits.
		MapIter->second.SignMiscInfo |= NewFG.SignMiscInfo;
		MapIter->second.SizeInfo |= NewFG.SizeInfo;
	}

	return;
} // end of SMPFunction::UpdateDefFGInfo()

void SMPFunction::UpdateUseFGInfo(int UseHashValue, struct FineGrainedInfo NewFG) {
	map<int, struct FineGrainedInfo>::iterator MapIter;
	pair<map<int, struct FineGrainedInfo>::iterator, bool> MapResult;

	MapIter = this->GlobalUseFGInfoBySSA.find(UseHashValue);
	if (MapIter == this->GlobalUseFGInfoBySSA.end()) {
		// Not found; insert it.
		pair<int, struct FineGrainedInfo> MapItem(UseHashValue, NewFG);
		MapResult = this->GlobalUseFGInfoBySSA.insert(MapItem);
		assert(MapResult.second); // Was not previously found, insertion must work.
	}
	else { // found; just put in the new bits.
		MapIter->second.SignMiscInfo |= NewFG.SignMiscInfo;
		MapIter->second.SizeInfo |= NewFG.SizeInfo;
	}

	return;
} // end of SMPFunction::UpdateUseFGInfo()

// Reset the signedness bits to zero for DEF.
void SMPFunction::ClearDefSignedness(int DefHashValue) {
	map<int, struct FineGrainedInfo>::iterator MapIter;

	MapIter = this->GlobalDefFGInfoBySSA.find(DefHashValue);
	if (MapIter != this->GlobalDefFGInfoBySSA.end()) {
		MapIter->second.SignMiscInfo &= (~FG_MASK_SIGNEDNESS_BITS);
	}
	return;
} // end of SMPFunction::ClearDefSignedness()

// Erase a range of instructions from the Instrs list, usually corresponding
//  the the range of a basic block.
void SMPFunction::EraseInstRange(STARS_ea_t FirstAddr, STARS_ea_t LastAddr) {
	list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
	SMPInstr *CurrInst;
	STARS_ea_t InstAddr;

	bool FoundFirstAddr = false;
	bool FoundLastAddr = false;
	while (InstIter != this->Instrs.end()) {
		CurrInst = (*InstIter);
		InstAddr = CurrInst->GetAddr();
		if (InstAddr == FirstAddr) {
			FoundFirstAddr = true;
		}
		if ((FoundFirstAddr) && (!CurrInst->IsMarkerInst())) {
			InstIter = this->Instrs.erase(InstIter);
			if (InstAddr == LastAddr) {
				FoundLastAddr = true;
				break;
			}
		}
		else {
			++InstIter;
		}
	}
	assert(FoundFirstAddr);
	assert(FoundLastAddr);
	return;
} // end of SMPFunction::EraseInstRange()

// For instruction address UseAddr, compute the reaching defs for operand TempOp,
//  placing them into the TempReachingDefs list.
void SMPFunction::ComputeTempReachingDefs(const STARSOpndTypePtr &TempOp, STARS_ea_t UseAddr) {
	this->TempReachingDefs.clear();
	SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(UseAddr);
	assert(NULL != CurrBlock);
	STARSDefinitionSet::iterator ReachesInIter;
	pair<set<STARS_ea_t, LessAddr>::iterator, bool> InsertResult;

	// Start with the matching members of the ReachesIn set for the current basic block.
	for (ReachesInIter = CurrBlock->GetFirstReachesIn(); ReachesInIter != CurrBlock->GetLastReachesIn(); ++ReachesInIter) {
		pair<STARSOpndTypePtr, STARS_ea_t> ReachesInDef = *ReachesInIter;
		if (IsEqOp(TempOp, ReachesInDef.first)) {
			InsertResult = this->TempReachingDefs.insert(ReachesInDef.second);
			assert(InsertResult.second);
		}
	}

	// Now, see if any def in the block hides the ReachesIn defs before we get to UseAddr.
	vector<SMPInstr *>::iterator InstIter;
	for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
		SMPInstr *CurrInst = *InstIter;
		STARS_ea_t InstAddr = CurrInst->GetAddr();
		if (CurrInst->IsMarkerInst())
			continue;
		if (InstAddr >= UseAddr)
			break;
		set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->FindDef(TempOp);
		if (DefIter != CurrInst->GetLastDef()) {
			// Found a def. All previous defs of TempOp are hidden from UseAddr by this def.
			this->TempReachingDefs.clear();
			InsertResult = this->TempReachingDefs.insert(InstAddr);
			assert(InsertResult.second);
		}
	}
	return;
} // end of SMPFunction::ComputeTempReachingDefs()

// Find all the saved stack deltas (if any) for the def addrs in the TempReachesDefs list for TempOp.
//  Put the entries matching TempOp into TempStackDeltaReachesList.
void SMPFunction::ComputeTempStackDeltaReachesList(const STARSOpndTypePtr &TempOp) {
	bool FoundOperand = false;
	set<pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> >, LessStackDeltaCopy>::iterator CopyIter;
	this->TempStackDeltaReachesList.clear();
	for (CopyIter = this->StackPtrCopySet.begin(); CopyIter != this->StackPtrCopySet.end(); ++CopyIter) {
		pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> > CopyEntry = *CopyIter;
		if (IsEqOp(TempOp, CopyEntry.first)) {
			set<STARS_ea_t, LessAddr>::iterator FindReachDefIter;
			FoundOperand = true; // help us save time later by exiting loop
			// Match address at which stack ptr copy was made to a reaching def address for TempOp.
			FindReachDefIter = this->TempReachingDefs.find(CopyEntry.second.first);
			if (FindReachDefIter != this->TempReachingDefs.end()) {
				// Found a StackPtrCopySet entry for TempOp, AND we found the DefAddr
				//  in the TempReachingDefs set. 
				this->TempStackDeltaReachesList.push_back(CopyEntry.second); // push back a pair<ea_t, STARS_sval_t>
			}
		}
		else if (FoundOperand) {
			// We have found the operand, but have now moved past it in the iteration of StackPtrCopySet.
			//  Save time by exiting the loop.
			break;
		}
	}
	return;
} // end of SMPFunction::ComputeTempStackDeltaReachesList()

// Find the largest stack delta in the TempStackDeltaReachesList.
// Return true if only one value was found in the list.
bool SMPFunction::FindReachingStackDelta(STARS_sval_t &StackDelta) {
	bool UniqueDelta = true;

	if (this->TempStackDeltaReachesList.empty()) {
		StackDelta = 0;
		return false;
	}
	else {
		StackDelta = this->TempStackDeltaReachesList.front().second;
	}

	list<pair<STARS_ea_t, STARS_sval_t> >::iterator DeltaIter;
	for (DeltaIter = this->TempStackDeltaReachesList.begin(); DeltaIter != this->TempStackDeltaReachesList.end(); ++DeltaIter) {
		STARS_sval_t NewDelta = DeltaIter->second;
		if (NewDelta != StackDelta) {
			UniqueDelta = false;
			if (NewDelta > StackDelta) {
				StackDelta = NewDelta;
			}
		}
	}
	return UniqueDelta;
} // end of SMPFunction::FindReachingStackDelta()

// Find any apparent stack adjustment after the call instruction at CallAddr,
//  confining our search to the basic block containing CallAddr.
STARS_sval_t SMPFunction::GetStackAdjustmentForCallee(STARS_ea_t CallAddr) {
	STARS_sval_t CalleeAdjustment = 0;

	SMPBasicBlock *CallBlock = this->GetBlockFromInstAddr(CallAddr);
	assert(NULL != CallBlock);
	STARS_sval_t BlockAnalysisDelta = CallBlock->ComputeStackAdjustmentAfterCall(CallAddr);
	if (0 != BlockAnalysisDelta) {
		CalleeAdjustment = BlockAnalysisDelta;
		SMP_msg("INFO: Block analysis produced callee adjustment of %ld bytes after %llx\n", 
			(long) CalleeAdjustment, (unsigned long long) CallAddr);
	}

	return CalleeAdjustment;
} // end of SMPFunction::GetStackAdjustmentForCallee()

// Get stack delta from a callee function that is unable to provide the info from
//  its own analyses (e.g. analyses failed or have not been performed yet, due to
//  a mutually recursive clique in the call graph). We have three approaches in
//  this case: Use a default value, consult IDA Pro's analyses, or see if we can
//  detect a stack adjustment after the call instruction, from which we could infer
//  the stack delta of the callee. We choose the latter approach, and find the smallest
//  adjustment among all call sites for the callee.
STARS_sval_t SMPFunction::GetStackDeltaForCallee(STARS_ea_t CallTargetAddr) {
	STARS_sval_t CalleeDelta = CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA;

	SMPFunction *CalleeFunc = this->GetProg()->FindFunction(CallTargetAddr);
	if (nullptr != CalleeFunc) {
		STARS_sval_t GlobalAdjustment = CalleeFunc->ComputeGlobalStackAdjustment();
		if (0 != GlobalAdjustment) {
			CalleeDelta -= GlobalAdjustment;
			SMP_msg("INFO: Global stack adjustment analysis produced callee delta of %ld bytes after %llx\n",
				(long) CalleeDelta, (unsigned long long) CallTargetAddr);
		}
	}

	return CalleeDelta;
} // end of SMPFunction::GetStackDeltaForCallee()

// Compute a consistent (or smallest) stack adjustment seen program-wide after all calls to the current function.
//  Do not return a non-zero value unless more than one call site can be used as evidence.
STARS_sval_t SMPFunction::ComputeGlobalStackAdjustment(void) {
	bool FoundZeroAdjustment = false;
	STARS_sval_t GlobalAdjustment = 0;
	STARS_sval_t NegativeAdjustment = -10000; // record negative adjustments detected
	STARS_sval_t PositiveAdjustment = 10000; // record positive adjustments detected
	std::size_t NumCallSites = this->AllCallSites.size();

	// Use cached value if already computed.
	if (this->StackAdjustmentComputed) {
		return this->GlobalStackAdjustment;
	}

	if (1 < NumCallSites) { // if only one call site, it is dangerous to draw conclusions about seeming "adjustments."
		set<STARS_ea_t>::iterator CallSiteIter;
		for (CallSiteIter = this->AllCallSites.begin(); CallSiteIter != this->AllCallSites.end(); ++CallSiteIter) {
			STARS_ea_t CallSiteAddr = (*CallSiteIter);
			STARS_Function_t *CurrFunc = SMP_get_func(CallSiteAddr);
			assert(NULL != CurrFunc);
			STARS_ea_t CallerFirstAddr = CurrFunc->get_startEA();
			SMPFunction *CallerFunc = this->GetProg()->FindFunction(CallerFirstAddr);
			assert(nullptr != CallerFunc);
			STARS_sval_t CurrentAdjustment = CallerFunc->GetStackAdjustmentForCallee(CallSiteAddr);
			// See if CurrentAdjustment is a new, lowest positive value for GlobalAdjustment.
			if ((0 < CurrentAdjustment) && (CurrentAdjustment < PositiveAdjustment)) {
				PositiveAdjustment = CurrentAdjustment;
			}
			else if ((0 > CurrentAdjustment) && (CurrentAdjustment > NegativeAdjustment)) {
				NegativeAdjustment = CurrentAdjustment;
			}
			else if (0 == CurrentAdjustment) {
				FoundZeroAdjustment = true;
				break; // Any zero adjustment found invalidates non-zero inferences
			}
		}
	}

	// See if we consistently had positive or negative adjustments
	if (FoundZeroAdjustment) {
		GlobalAdjustment = 0; // cannot be a clear non-zero indication if we found any zeroes
	}
	else if (PositiveAdjustment < 10000) { // found at least one positive adjustment
		if (NegativeAdjustment > -10000) { // found at least one negative adjustment; bad
			GlobalAdjustment = 0; // inconsistent; reset to zero
		}
		else {
			GlobalAdjustment = PositiveAdjustment;
		}
	}
	else if (NegativeAdjustment > -10000) { // found negative but no positive adjustments
		GlobalAdjustment = NegativeAdjustment;
	}
	else { // did not find negative or positive adjustments
		GlobalAdjustment = 0;
	}

	this->StackAdjustmentComputed = true; // signal caching of the value for future speed
	this->GlobalStackAdjustment = GlobalAdjustment; // cache the value
	return GlobalAdjustment;
} // end of SMPFunction::ComputeGlobalStackAdjustment()

// Use IDA Pro stack pointer deltas instead of doing our own analysis.
bool SMPFunction::UseIDAStackPointerDeltas(void) {
	list<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;
	bool IDATraceFlag = false;
#if SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS
	IDATraceFlag = (0 == strcmp("do_length", this->GetFuncName()));
#endif

	InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	++InstIter; // skip marker pseudo-instruction
#endif
	while (InstIter != this->Instrs.end()) {
		CurrInst = *InstIter;
		STARS_sval_t IDAProDelta = SMP_get_spd(this->GetFuncInfo(), CurrInst->GetAddr());
		CurrInst->SetStackPtrOffset(IDAProDelta);
		++InstIter;
		if (IDATraceFlag) {
			SMP_msg("INFO: IDA Pro stack delta trace: %ld at %llx\n", (long) IDAProDelta, (unsigned long long) CurrInst->GetAddr());
		}
	}
	return true;
} // end of SMPFunction::UseIDAStackPointerDeltas()

// Analyze changes to the stack pointer over all instructions.
bool SMPFunction::AnalyzeStackPointerDeltas(void) {
	list<pair<SMPBasicBlock *, STARS_sval_t> > WorkList;
	vector<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;
	STARS_sval_t CurrentDelta = 0;
	STARS_sval_t DeltaIncrement = 0; // change when reprocessing a block in alloca()-calling function
	bool ConsistentNetDelta = true; // Net change to stack pointer is consistent at all RETURN locations
	bool ConflictingValuesSeen = false; // At least one block was entered with multiple deltas
	bool StackPointerRestoreSeen = false; // Stack pointer restored; must become true if ConflictingValuesSeen
	bool ReturnSeen = false;
	bool IDAProSucceeded = this->AnalyzedSP;
	bool DebugFlag = false;
	bool TraceFlag = false;
	bool IDATraceFlag = false;

#if SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS
	DebugFlag = (0 == strcmp("sub_41F320", this->GetFuncName()));
	TraceFlag = (0 == strcmp("sub_41F320", this->GetFuncName()));
#endif

	if (!this->HasGoodRTLs()) {
		SMP_msg("INFO: Using IDA Pro stack pointer deltas for BADRTLS function %s .\n", this->GetFuncName());
		// (void) this->UseIDAStackPointerDeltas();
		this->AnalyzedSP = false;
		return false; // leave it unsolved
	}

#if 0
	// Temporarily pull the functions that call alloca out of the stack pointer delta computations, so
	//  that we can focus on solving other problems.
	if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) {
		if (!this->AnalyzedSP) {
			(void) this->UseIDAStackPointerDeltas();
			return false; // leave it unsolved
		}
		else {
			SMP_msg("INFO: Using IDA Pro stack pointer deltas for alloca-calling function %s .\n", this->GetFuncName());
			return this->UseIDAStackPointerDeltas();
		}
	}
#endif

	// In order to precisely track stack deltas, we need to deal with instruction sequences that save the stack pointer
	//  and then restore it later. This requires a reaching definitions data flow analysis that includes, at a minimum,
	//  all stack definitions (normalized by stack delta, so that we do not confuse [esp+20] and [esp+20] where the values
	//  of esp are not the same). We also need to keep track of stack pointer saves in both registers and in stack locations.
	// In order for the information about saved stack pointer copies to be available as soon as we need them in the stack
	//  delta analysis, we have to perform both stack delta analysis and reaching definitions analysis at the same time. Luckily,
	//  both analyses are well suited to being performed as forward analyses starting from the entry basic block.
	//
	// Data structures for the reaching definitions analysis include a ReachesIn and a ReachesOut set for each basic block, a
	//  VarKill set for each block, and a DownExposedDefs set for each block. The VarKill set is shared with the later Live Variable
	//  Analysis (LVA), so we compute the VarKill and the UpExposed sets (UpExposed is only used by LVA) on the first pass through
	//  each block. The VarKill and all other LVA sets are sets of operands. The ReachesIn, ReachesOut, and DownExposedDefs sets
	//  are sets of definitions, where a definition is a pair<operand, instruction address>. The StackPtrCopySet is a triple of
	//   <operand, instruction address, stack delta>, arranged as a pair of pairs <operand, <addr, delta> >
	//
	// Algorithm: We maintain a WorkList of pairs <basic block pointer, incoming stack delta to that block>
	//
	// All sets are empty at the beginning.
	// Add the entry basic block to the WorkList, with IncomingDelta of zero.
	// while (WorkList is not empty) do
	//    de-queue first block from WorkList, obtain IncomingDelta
	//    Compute ReachesIn as the union of the ReachesOut of all predecesssor blocks
	//    if (block has not already been processed) then
	//       mark block as processed
	//       for each inst in block (forward iteration) do
	//           for each USE in inst do
	//              if USE operand not in VarKill set for block then
	//                  add USE operand to UpExposed set for block
	//              endif
	//              if USE operand is a stack pointer value AND it will produce DEF that is a stack pointer value then
	//                  if DEF is stack pointer register then  { a stack pointer value that was saved is being restored }
	//                      retrieve new stack pointer delta from saved value in StackPtrCopySet, looking it up in
	//                          that set using the reaching definitions for the USE operand. If inconsistent  ******
	//                  else { stack pointer value is being saved somewhere besides the stack pointer register }
	//                      add stack delta to StackPtrCopySet for DEF that is receiving it in current inst
	//                  endif
	//               endif
	//            endfor { each USE }
	//            for each DEF in inst do
	//               if register or stack operand then 
	//                  add to VarKill set
	//                  update DownExposedDefs set (insert, or replace current def for this operand)
	//               endif
	//            endfor { each DEF }
	//            Store IncomingDelta for current instruction
	//            Get change in delta for current instruction
	//            Add current change to IncomingDelta
	//       endfor { each inst }
	//       At end of block, make ReachesOut set be (ReachesIn minus VarKill) union DownExposedDefs
	//       For each successor block, add pairs <block pointer, IncomingDelta> to end of WorkList
	//    else { block has already been processed at least once}
	//       if IncomingDelta from WorkList is inconsistent with old IncomingDelta then
	//          if function calls alloca() then
	//             if new IncomingDelta makes stack frame look larger than old IncomingDelta then
	//                ignore new IncomingDelta and just process reaching definitions sets below
	//             else
	//                use new IncomingDelta and re-process deltas in this block to converge to
	//                 smallest stack frame, which means we are basically ignoring alloca()'s as much as possible.
	//             endif
	//          else
	//             Set AnalyzedSP to false, emit error message, clear WorkList and bail out of this function.
	//          endif
	//       endif { inconsistent IncomingDelta values }
	//       Recompute ReachesIn as union of ReachesOut of all predecessor blocks
	//       if ReachesIn set changed then
	//          recompute ReachesOut without examining instructions unless alloca() case requires iterating through instructions
	//       endif
	//       if any change in deltas or reaching definitions sets, then add block to end of WorkList along with all successor blocks.
	//    endif
	// end while

	// Mark all blocks as unprocessed
	this->ResetProcessedBlocks();

	this->AnalyzedSP = true;

	// Put the entry block on the work list.
	assert(!(this->Blocks.empty()));
	pair<SMPBasicBlock *, STARS_sval_t> WorkingPair(this->Blocks.front(), CurrentDelta);
	WorkList.push_back(WorkingPair);

	// While blocks exist on the work list
	//  if block already processed, confirm that we are re-entering
	//    the block with the same stack pointer delta as previously,
	//    and pop it off the work list
	//    otherwise declare the stack pointer to be un-analyzeable;
	//  else
	//     iterate through all instructions in the block, analyzing
	//     the stack pointer delta of each inst and accumulating current delta
	//     At the end of the block, put the successor blocks on the work list.
	// For both cases, maintain and update reaching definitions sets, and the
	//  UpExposed and VarKill sets that are used by LVA as well as reaching defs analysis.
	bool ReprocessingAllocaBlocks = false;
	bool ReachesInChanged;
	bool ReachesOutChanged = false;
	do {
		SMPBasicBlock *CurrBlock = WorkList.front().first;
		STARS_sval_t IncomingDelta = WorkList.front().second;

		if (TraceFlag) {
			SMP_msg("TRACE: Starting block with IncomingDelta of %lld\n", (long long) IncomingDelta);
		}
#if SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS
		if (DebugFlag && IDAProSucceeded) {
			STARS_ea_t InstAddr = CurrBlock->GetFirstAddr();
			if (!STARS_IsSSAMarkerPseudoID(InstAddr)) {
				STARS_sval_t IDAProDelta = SMP_get_spd(this->GetFuncInfo(), InstAddr);
				if (IDAProDelta != IncomingDelta) {
					intmax_t IDADelta = (intmax_t) IDAProDelta, OurDelta = (intmax_t) IncomingDelta;
					if (sizeof(STARS_sval_t) == 8) {
						SMP_msg("ERROR: At %p block entry IDA Pro has stack pointer delta of %jd and we compute %jd BlockProcessed: %d\n",
							InstAddr, IDADelta, OurDelta, CurrBlock->IsProcessed());
					}
					else {
						assert(sizeof(STARS_sval_t) == 4);
						SMP_msg("ERROR: At %p block entry IDA Pro has stack pointer delta of %d and we compute %d BlockProcessed: %d\n",
							InstAddr, IDAProDelta, IncomingDelta, CurrBlock->IsProcessed());
					}
				}
			}
		}
#endif

		if (0 < IncomingDelta) {
			SMP_msg("ERROR: Stack delta of %ld implies stack underflow in func at %llx\n",
				(long) IncomingDelta, (unsigned long long) this->GetFirstFuncAddr());
			this->AnalyzedSP = false;
			WorkList.clear();
			break;
		}

		if (CurrBlock->IsProcessed()) { // already processed
			ReachesOutChanged = false;
#if 0
			ReachesInChanged = CurrBlock->ComputeReachesInSet();
			if (ReachesInChanged) {
				ReachesOutChanged = CurrBlock->ComputeReachesOutSet();
			}
#else
			if (CurrBlock->IsReachesOutStale()) {
				ReachesOutChanged = CurrBlock->ComputeReachesOutSet();
			}
#endif
			if (ReachesOutChanged) {
				// Push the successor blocks onto the work list
				STARS_sval_t SuccIncomingDelta = CurrBlock->GetOutgoingStackDelta();
				list<SMPBasicBlock *>::iterator SuccIter;
				for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
					pair<SMPBasicBlock *, STARS_sval_t> SuccPair (*SuccIter, SuccIncomingDelta);
					WorkList.push_back(SuccPair);
				}
			}
			InstIter = CurrBlock->GetFirstInst();
			STARS_sval_t PrevIncomingDelta = (*InstIter)->GetStackPtrOffset();
			if (IncomingDelta == PrevIncomingDelta) {
				// No error, already processed.
				WorkList.pop_front(); // discard already processed block.
			}
#if 1
			else if (this->CallsAlloca || this->HasPushAfterFrameAlloc()) 
#else
			else 
#endif
			{
				ConflictingValuesSeen = true;
				// Calls to alloca() become additional stack allocations, which can produce
				//  multiple possible stack deltas for an instruction if different paths
				//  to the instruction do not hit the same alloca() calls, so it is not
				//  an error to have conflicting deltas in the functions that call alloca().
				//  We want to converge to the smallest magnitude deltas, which are the greatest
				//  values because the deltas are negative. This is the opposite of IDA Pro, which
				//  seems to use the largest stack deltas it has seen.
				if (PrevIncomingDelta >= IncomingDelta) {
					// Old incoming delta should be retained.
					WorkList.pop_front(); // discard already processed block.
				}
				else {
					CurrBlock->SetProcessed(false);
					ReprocessingAllocaBlocks = true;
					DeltaIncrement = IncomingDelta - PrevIncomingDelta;
					if (0 != (DeltaIncrement % 32)) { // 32 bytes is our alloca fudge increment
						SMP_msg("WARNING: DeltaIncrement of %d not alloca multiple for block at %llx\n",
							(unsigned long long) CurrBlock->GetFirstAddr());
					}
					continue;  // Make the loop come around and process this block again, using
							   //  the new incoming delta. Because we do this only when it decreases
							   //  the stack size as seen by this block, no infinite loop is possible.
				}
			}
#if 1
			else {
				this->AnalyzedSP = false;
				SMP_msg("ERROR: Stack delta: PrevIncoming is %ld NewIncoming is %ld at %llx\n",
					(long) PrevIncomingDelta, (long) IncomingDelta, (unsigned long long) (*InstIter)->GetAddr());
				WorkList.clear();
			}
#endif
		}
		else { // not already processed
			// ReprocessingAllocaBlocks => Reaching definitions sets have already been computed; just need to do stack delta analysis
			ReachesOutChanged = false;
#if 0
			ReachesInChanged = CurrBlock->ComputeReachesInSet();
			if (ReachesInChanged && ReprocessingAllocaBlocks) {
				// Because block is not truly being processed for the first time, the ReachesOut set can be
				//  recomputed without processing instructions, as the DEDefs set and VarKill set will never
				//  change after the first pass through the block.
				ReachesOutChanged = CurrBlock->ComputeReachesOutSet();
			}

			if (CurrBlock->IsReachesOutStale()) {
				ReachesOutChanged = CurrBlock->ComputeReachesOutSet();
			}
#endif
			CurrBlock->SetProcessed(true);
			WorkList.pop_front();
			for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				CurrInst = (*InstIter);
				if (CurrInst->IsMarkerInst()) {
					continue; // skip marker instruction
				}
				STARS_ea_t InstAddr = CurrInst->GetAddr();
				if (InstAddr == this->GetFirstFrameAllocInstAddr()) {
					// Record the reset point for frame deallocations
					this->PreAllocStackDelta = IncomingDelta;
				}

				CurrInst->SetStackPtrOffset(IncomingDelta);

				// Search for tail calls, defined strictly as having an incoming stack delta of zero and
				//  being jumps to far chunks not included in the current function.
				if (0 == IncomingDelta) {
					if (CurrInst->IsBranchToOtherFunc()) {
						CurrInst->SetTailCall();
#if 0
						SMP_msg("INFO: Found tail call at %llx from %s: %s\n", (unsigned long long) InstAddr, this->GetFuncName(),
								CurrInst->GetDisasm());
#endif
					}
					else if (INDIR_JUMP == CurrInst->GetDataFlowType()) {
						// We have an indirect jump with an incoming delta of zero. This
						//  is probably an indirect tail call, unless the target address has
						//  been resolved to addresses in the current function (e.g. switch table).
						if (this->HasUnresolvedIndirectJumps() && (STARS_BADADDR == CurrInst->GetCallTarget())) {
							CurrInst->SetTailCall();
#if 0
							SMP_msg("INFO: Found indirect tail call at %llx from %s: %s\n", (unsigned long long) InstAddr, this->GetFuncName(),
									CurrInst->GetDisasm());
#endif
						}
					}
				}

#if SMP_COMPARE_IDA_STARS_STACK_POINTER_DELTAS
				if (DebugFlag && IDAProSucceeded && !(this->CallsAlloca || this->HasPushAfterFrameAlloc())) {
					STARS_sval_t IDAProDelta = SMP_get_spd(this->GetFuncInfo(), InstAddr);
					if ((IDAProDelta != IncomingDelta) && (!CurrInst->MDIsHaltInstr())) {
						// IDA Pro special-cases the HALT instruction to make it appear that the
						//  incoming stack delta is zero. We do no such special case delta adjudstment,
						//  so we suppress error messages, as our delta will be non-zero.
						SMP_msg("ERROR: At %llx IDA Pro has stack pointer delta of %lld and we compute %lld\n", 
							(unsigned long long) InstAddr, (long long) IDAProDelta, (long long) IncomingDelta);
					}
				}
				if (TraceFlag) {
					SMP_msg("INFO: Stack delta trace: %lld at %llx\n",
						(long long) IncomingDelta, (unsigned long long) InstAddr);
				}
#endif

				// As soon as the stack ptr offset has been set for the current instruction, we can normalize
				//  all of its stack DEFs and USEs.
				bool StackOpsChanged = CurrInst->MDNormalizeStackOps(this->UsesFramePointer(), this->GetFramePtrStackDelta(), ReprocessingAllocaBlocks, DeltaIncrement);

				// Dataflow equation for upward exposed variables: If a variable has not been
				//  killed yet in this block, starting from the top of the block, and it is used
				//  in the current instruction, then it is upwardly exposed.
				set<DefOrUse, LessDefUse>::iterator CurrUse;
				if (!ReprocessingAllocaBlocks) { // Only compute on first pass through block
					for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
						STARSOpndTypePtr UseOp = CurrUse->GetOp();
						// CanonicalizeOpnd(UseOp);
						if (MDIsDataFlowOpnd(UseOp, this->UsesFramePointer())) {
							// We have a register or stack operand. If stack operand, it is normalized, i.e. EBP-4 might be ESP-8,
							//  where the ESP-8 refers to the value of ESP upon entry to the function, not its current value.
							//  This normalization makes each stack location uniquely named (no aliases at different code locations due
							//  to different values of ESP at different code locations).
							// We only track certain kinds of operands in our data flow analyses.
							// Only add non-immediate operands that are not already killed in this block.
							//  o_near and o_far operands are code addresses in immediate form, e.g.
							//  call _printf might be call 0x8048040, with o_near = 0x8048040.
							if (!(CurrBlock->MDAlreadyKilled(UseOp))) {
								CurrBlock->AddUpExposed(UseOp);
							}
						}
					}
				}

				// Find stack pointer saves and restores.
				bool StackPtrSaved;
				STARS_sval_t SavedDelta;
				STARSOpndTypePtr CopyOperand = nullptr;
				bool SavedDeltaHasNewValue = false;
				bool ErrorFlag = false;
				if (CurrInst->MDIsStackPtrSaveOrRestore(this->UsesFramePointer(), this->GetFramePtrStackDelta(), StackPtrSaved, SavedDelta, CopyOperand, ErrorFlag)) {
					// NOTE: If CopyOperand is a stack location, it is normalized.
					if (StackPtrSaved) {
						// Insert new entry into the StackPtrCopySet. For the ReprocessingAllocaBlocks case, this might be
						//  just a tricky update of the delta for an existing item in the set.
						bool DeltaInserted = this->AddToStackPtrCopySet(CopyOperand, InstAddr, SavedDelta);
						if (TraceFlag) {
							SMP_msg("INFO: Stack delta saved: %ld at %llx\n", (long) SavedDelta, (unsigned long long) InstAddr);
							PrintOperand(CopyOperand);
							SMP_msg("\n");
						}
					}
					else { // stack pointer was restored from saved value
						StackPointerRestoreSeen = true;
						SavedDeltaHasNewValue = true; // no need to compute effect of restore instruction later
						if (ReprocessingAllocaBlocks) {
							// Now that the stack pointer has been restored, the effect of the alloca() should
							//  be undone. We no longer need to adjust delta values for the rest of the block.
							DeltaIncrement = 0;
						}
					}
				} // end if (CurrInst->MDIsStackPtrSaveOrRestore())
				else if (ErrorFlag) {
					this->AnalyzedSP = false;
					WorkList.clear();
					SMP_msg("ERROR: ErrorFlag=true from MDIsStackPtrSaveOrRestore() at %llx\n",
						(unsigned long long) InstAddr);
					break;
				}
				else if (CurrInst->MDIsLeaveInstr()) {
					// LEAVE is a restoration of a stack pointer, not processed by CurrInst->MDIsStackPtrSaveOrRestore()
					StackPointerRestoreSeen = true; 
				}

				// Update VarKill and DownExposedDefs sets for DEFs in current instruction.
				// Dataflow equation for killed variables: If a variable is defined in any
				//  instruction in the block, it is killed by this block (i.e. prior definitions
				//  of that variable will not make it through the block).
				if (!ReprocessingAllocaBlocks) { // Only compute on first pass through block
					set<DefOrUse, LessDefUse>::iterator CurrDef;
					for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
						STARSOpndTypePtr DefOp = CurrDef->GetOp();
						if (MDIsDataFlowOpnd(DefOp, this->UsesFramePointer())) {
							// We have a register or stack operand. If stack operand, it is normalized, i.e. EBP-4 might be ESP-8,
							//  where the ESP-8 refers to the value of ESP upon entry to the function, not its current value.
							//  This normalization makes each stack location uniquely named (no aliases at different code locations due
							//  to different values of ESP at different code locations).
							CurrBlock->AddVarKill(DefOp);
							CurrBlock->UpdateDownExposedDefs(DefOp, InstAddr);
						}
					}
				}

				if (SavedDeltaHasNewValue) {
					IncomingDelta = SavedDelta; // from restore instruction
				}
				else {
					CurrentDelta = CurrInst->AnalyzeStackPointerDelta(IncomingDelta, this->GetFramePtrStackDelta());
					if (SMP_STACK_POINTER_BITWISE_AND_CODE == ((STARS_uval_t) CurrentDelta)) {
						// For now, we ignore instructions that AND a constant into the stack pointer.
						CurrentDelta = 0;
						SMP_msg("WARNING: Stack pointer bitwise AND ignored at %llx\n",
							(unsigned long long) CurrInst->GetAddr());
					}
					else if (SMP_STACK_DELTA_ERROR_CODE == ((STARS_uval_t) CurrentDelta)) {
						this->AnalyzedSP = false;
						SMP_msg("ERROR: Stack delta unanalyzeable at %llx\n", (unsigned long long) InstAddr);
						WorkList.clear();
						break;
					}
					if (TraceFlag) {
						SMP_msg("TRACE: CurrentDelta is %lld at %llx\n", (long long) CurrentDelta, (unsigned long long) InstAddr);
					}
					SMPitype FlowType = CurrInst->GetDataFlowType();
					IncomingDelta += CurrentDelta;
					if ((RETURN == FlowType) && (!CurrInst->IsCondTailCall()) && (!CurrInst->IsTailCall())) {
						// We hope to see a consistent outgoing delta from all RETURN points.
						//  We special-case the conditional jump used as tail call, because it must be followed
						//  by a real return instruction later. If the jump is taken, it acts as a return, but
						//  it has not yet popped the stack.
						// Also, a regular tail call always has the stack delta at zero and does not match
						//  the stack delta of actual return instructions elsewhere in the function.
						if (ReturnSeen) { // This is not the first RETURN seen.
							if (IncomingDelta != this->NetStackDelta) { // Inconsistent
								SMP_msg("ERROR: Inconsistent stack deltas at return instruction at %p : Previous: %lld Current: %lld\n",
									CurrInst->GetAddr(), (long long) this->NetStackDelta, (long long) IncomingDelta);
								ConsistentNetDelta = false;
								this->AnalyzedSP = false;
								WorkList.clear();
								break;
							}
						}
						else { // First RETURN statement seen.
							ReturnSeen = true;
							this->NetStackDelta = IncomingDelta;
#if SMP_AUDIT_STACK_POINTER_DELTAS
							if (CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA != IncomingDelta) {
								SMP_msg("WARNING: Stack delta not %d after return instruction at %llx\n", 
									CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA, (unsigned long long) CurrInst->GetAddr());
							}
#endif
						}
						// If we permitted inconsistent stack deltas previously, then the stack pointer has to
						//  have been restored, e.g. if we allocate a frame with sub esp,32 and then we later
						//  have paths that pass through an alloca() call, a push, etc., then the alloca() or
						//  push will not be undone by add esp,32. It must be undone by something like mov esp,ebp.
						if (ConflictingValuesSeen && !StackPointerRestoreSeen) {
							SMP_msg("ERROR: Inconsistent stack deltas seen, no stack pointer restore before return instruction at %llx\n",
								(unsigned long long) CurrInst->GetAddr());
							this->AnalyzedSP = false;
							WorkList.clear();
							break;
						}
					}
				} // end if (SavedDeltaHasNewValue) ... else ...
			} // end for each instruction in WorkList block
			if (CurrBlock->IsReachesOutStale()) {
				ReachesOutChanged = CurrBlock->ComputeReachesOutSet();
			}
			// Push the successor blocks onto the work list if anything changed
			if (this->AnalyzedSP) { // if we do not have an error already
				CurrBlock->SetOutgoingStackDelta(IncomingDelta); // record incoming delta for all successors
				if (ReachesOutChanged || (!ReprocessingAllocaBlocks)) { // if anything changed (deltas or reaching defs ReachOut set)
					list<SMPBasicBlock *>::iterator SuccIter;
					if (DebugFlag && (0 == IncomingDelta)) {
						SMP_msg("ERROR: Pushing WorkList items with IncomingDelta of zero. Dumping Block:\n");
						CurrBlock->Dump();
					}
					for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
						pair<SMPBasicBlock *, STARS_sval_t> SuccPair (*SuccIter, IncomingDelta);
						WorkList.push_back(SuccPair);
					}
				}
			}
		} // end if block already processed ... else ...
		ReprocessingAllocaBlocks = false; // reset to default before processing next worklist element
	} while (!WorkList.empty());

	this->STARSStackPtrAnalysisPerformed = true;
	if (this->AnalyzedSP) {
		if (CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA != ((size_t) this->NetStackDelta)) {
			SMP_msg("WARNING: Non-default stack ptr delta %ld for function: %s\n", (long) this->NetStackDelta, this->GetFuncName());
		}
		if (this->StackAdjustmentComputed 
			&& (this->GlobalStackAdjustment != (((STARS_sval_t) CALLING_CONVENTION_DEFAULT_FUNCTION_STACK_DELTA) - this->NetStackDelta))) {
			// Oops. When program graph cycles caused us to try to compute the GlobalStackAdjustment as our best guess
			//  for this function's effect on the stack delta, we told our callers that these three values would cancel out.
			//  They do not. Our callers have now been using a bad stack delta for their call instructions. Too late for
			//  anything but a diagnostic message.
				SMP_msg("ERROR: Earlier GlobalStackAdjustment computation %ld does not agree with current NetStackDelta result for function: %s\n",
					(long) this->GlobalStackAdjustment, this->GetFuncName());
		}
	}


	if (IDAProSucceeded) {
		if (!this->AnalyzedSP) {
			SMP_msg("ERROR: Stack Ptr Delta Analysis succeeded in IDA, failed in STARS for %llx : %s\n", 
				(unsigned long long) this->GetFirstFuncAddr(), this->GetFuncName());
		}
	}
	else {
		if (this->AnalyzedSP) {
			SMP_msg("SUCCESS: Stack Ptr Delta Analysis failed in IDA, succeeded in STARS for %llx : %s\n",
				(unsigned long long) this->GetFirstFuncAddr(), this->GetFuncName());
		}
	}
	if (!this->AnalyzedSP) {
		;
		// (void) this->UseIDAStackPointerDeltas();
	}
	else {
		// Success, so try to find saved/restored register pairs so that we do not
		//  conservatively say that the function kills registers that it in fact preserves.
		this->DetectThunkFunction();
		this->FindPreservedRegs();
		this->ComputeGlobalSets();
	}

	// Cannot keep the reaching defs around on huge benchmarks, or we run out of memory.
	//  Once we have SSA form, we can obtain reaching defs info on the fly if we want it.
	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		(*BlockIter)->FreeReachingDefsMemory();
	}

	return this->AnalyzedSP;
} // end of SMPFunction::AnalyzeStackPointerDeltas()

// Insert the arguments into the StackPtrCopySet; or, if a matching entry already exists
//  with a StackDelta of greater magnitude than StackDelta, update just the StackDelta.
// Return true if StackDelta was inserted, false if it was used to update an old entry.
bool SMPFunction::AddToStackPtrCopySet(STARSOpndTypePtr CopyOp, STARS_ea_t InstAddr, STARS_sval_t StackDelta) {
	bool NewInsertion;
	pair<STARS_ea_t, STARS_sval_t> InsertStackDefn(InstAddr, StackDelta);
	pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> > InsertStackDefnOp(CopyOp, InsertStackDefn);
	set<pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> >, LessStackDeltaCopy>::iterator FindIter;
	pair<set<pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> >, LessStackDeltaCopy>::iterator, bool> InsertResult;

	FindIter = this->StackPtrCopySet.find(InsertStackDefnOp);
	if (FindIter == this->StackPtrCopySet.end()) {
		// Not already present; insert new triple.
		NewInsertion = true;
		InsertResult = this->StackPtrCopySet.insert(InsertStackDefnOp);
		assert(InsertResult.second);
	}
	else {
		// Already there; see if delta needs to be updated.
		NewInsertion = false;
		pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> > OldStackDefnOp(*FindIter);
		// Favor a smaller stack frame for the alloca-calling functions, e.g. favor -24 over -32 as a delta.
		if (StackDelta > OldStackDefnOp.second.second) {
			// Replace the old entry with a new one.
			this->StackPtrCopySet.erase(FindIter);
			InsertResult = this->StackPtrCopySet.insert(InsertStackDefnOp);
			assert(InsertResult.second);
		}
	}

	return NewInsertion;
} // end of SMPFunction::AddToStackPtrCopySet()

// Determined which regs are not killed by func or its callees; set bits in PreservedRegsBitmap
void SMPFunction::FindPreservedRegs(void) {
	// First, find candidates from the pushes in the entry block.
	bool EntryBlockProcessed = false;
	bool ReturnBlockSeen = false;
	SMPBasicBlock *CurrBlock;
	map<uint32_t, STARS_sval_t> PushedRegsList, PoppedRegsList;
	list<SMPBasicBlock *>::iterator BlockIter;
	vector<SMPInstr *>::iterator InstIter;
	bool success = true;
	for (BlockIter = this->Blocks.begin(); success && (BlockIter != this->Blocks.end()); ++BlockIter) {
		CurrBlock = (*BlockIter);
		if (!EntryBlockProcessed) { // Must be first block
			EntryBlockProcessed = true;
			for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				if (CurrInst->MDIsPushInstr() || CurrInst->MDIsEnterInstr()) {
					// Get list of regs saved, and their relative stack offsets, from the RTLs.
					//  Could be a PushAllRegs instruction, so there might be more than one.
					success = CurrInst->GetPushedRegsList(PushedRegsList);
					if (!success)
						break;
				}
			}
		}
		if (EntryBlockProcessed && CurrBlock->HasReturn()) {
			// See if we have pops that match the pushes.
			size_t MatchCount = 0;
			for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				if (CurrInst->MDIsPopInstr() || CurrInst->MDIsLeaveInstr()) {
					// Get list of regs restored, and their relative stack offsets, from the RTLs.
					//  Could be a PopAllRegs instruction, so there might be more than one.
					success = CurrInst->GetPoppedRegsList(!ReturnBlockSeen, PoppedRegsList);
					if (success)
						++MatchCount;
					else
						break;
				}
			}
			if (ReturnBlockSeen) { // this is not the first return block
				if (MatchCount != PoppedRegsList.size()) {
					// Did not have same number of successes as prior return blocks. Inconsistent.
					success = false;
					break;
				}
			}
			ReturnBlockSeen = true;
		}
	} // end for all blocks

	if (success) {
		// Try to match elements from the two lists.
		map<uint32_t, STARS_sval_t>::iterator PushIter, PopIter;
		while (!PushedRegsList.empty() && (!PoppedRegsList.empty())) {
			PushIter = PushedRegsList.begin();
			PopIter = PoppedRegsList.begin();
			uint32_t PushedReg = PushIter->first;
			uint32_t PoppedReg = PopIter->first;
			if (PushedReg == PoppedReg) {
				if (PushIter->second == PopIter->second) { // match
					this->PreservedRegsBitmap.set(PushedReg);
				}
				PushedRegsList.erase(PushIter);
				PoppedRegsList.erase(PopIter);
			}
			else if (PushedReg < PoppedReg) {
				PushedRegsList.erase(PushIter);
			}
			else { // must be PushedReg > PoppedReg
				PoppedRegsList.erase(PopIter);
			}
		}
	}

	PushedRegsList.clear();
	PoppedRegsList.clear();
	return;
} // end of SMPFunction::FindPreservedRegs()

// Find failure to preserve callee-saved regs
void SMPFunction::AuditCallingConvention(void) {
	string FuncName(this->GetFuncName());
	// Exclude special cases that are not required to preserve callee-saved regs.
	if (IsStartupFuncName(FuncName) || (0 == strcmp("main", FuncName.c_str())) || this->IsThunkFunction())
		return;

	STARSOpndSetIter KillIter;
	for (KillIter = this->GetFirstVarKill(); KillIter != this->GetLastVarKill(); ++KillIter) {
		STARSOpndTypePtr KillOp = (*KillIter);
		if (KillOp->IsRegOp()) {
			STARS_regnum_t RegNum = KillOp->GetReg();
			if (!this->IsRegPreserved(RegNum)) {
				// Killed and not saved+restored
				if (global_STARS_program->IsCalleeSavedReg(RegNum)) {
					SMP_msg("WARNING: Callee-saved reg %u killed and not preserved in func at %llx\n",
						RegNum, (unsigned long long) this->GetFirstFuncAddr());
				}
			}
		}
	}
	return;
} // SMPFunction::AuditCallingConvention()

void SMPFunction::FindAllAllocsAndDeallocs(void) {
	bool FoundAllocInstr = false;
	bool FoundDeallocInstr = false;
	bool FoundAlloca = false;
	bool DebugFlag = false;
#if SMP_DEBUG_FRAMEFIXUP
	DebugFlag |= (0 == strcmp("frame_dummy", this->GetFuncName()));
#endif

	list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	++InstIter;  // skip marker instruction
#endif
	for (; InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		STARS_ea_t addr = CurrInst->GetAddr();

		// Keep the most recent instruction in the DeallocInstr
		//  in case we reach the return without seeing a dealloc.
		if (!FoundDeallocInstr) {
			this->LocalVarsDeallocInstr = addr;
		}

		if (addr == this->LocalVarsAllocInstr) {
			FoundAllocInstr = true;
		}
		else if (FoundAllocInstr) {
			// We can now start searching for the DeallocInstr.
			if (CurrInst->MDIsFrameDeallocInstr(UseFP, this->GetLocalVarsSize())) {
				// Keep saving the most recent addr that looks
				//  like the DeallocInstr until we reach the
				//  end of the function. Last one to look like
				//  it is used as the DeallocInstr.
#if SMP_DEBUG_CONTROLFLOW
				SMP_msg("Returned from MDIsFrameDeallocInstr()\n");
#endif
				this->LocalVarsDeallocInstr = addr;
				FoundDeallocInstr = true;
			}
			else if (CurrInst->MDIsFrameAllocInstr()) {
				FoundAlloca = true;
				if (CurrInst->HasAllocaRTL()) {
					CurrInst->SetAllocaCall();
				}
			}
			else if (CurrInst->MDIsPushInstr()) {
				this->PushAfterLocalVarAlloc = true;
			}
		}
		else if (CurrInst->HasAllocaRTL()) {
			CurrInst->SetAllocaCall();
			// Don't set this->CallsAlloca() if the alloca() is before the frame alloc
		}
	}
	this->CallsAlloca = FoundAlloca;

	return;
} // end of SMPFunction::FindAllAllocsAndDeallocs()

// Compute FramePointerStackDelta as soon as possible so that it is available for SyncAllRTs().
void SMPFunction::FindFramePointerDelta(void) {
	bool FirstBlockProcessed = false;
	bool FPSaved = false;  // have seen push of frame pointer reg
	bool SPintoFP = false; // have seen copy of stack pointer into frame pointer
	STARS_sval_t IncomingDelta = 0;
	STARS_sval_t CurrentDelta;
	list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	++InstIter;  // skip marker instruction
#endif
	while (!FirstBlockProcessed && (InstIter != this->Instrs.end())) {
		SMPInstr *CurrInst = (*InstIter);
		// Accumulate stack delta values.
		CurrentDelta = CurrInst->AnalyzeStackPointerDelta(IncomingDelta, this->GetFramePtrStackDelta());
		if (SMP_STACK_POINTER_BITWISE_AND_CODE == ((STARS_uval_t) CurrentDelta)) {
			// For now, we ignore instructions that AND a constant into the stack pointer.
			CurrentDelta = 0;
		}
		else if (SMP_STACK_DELTA_ERROR_CODE == ((STARS_uval_t) CurrentDelta)) {
			this->AnalyzedSP = false;
			break; // error exit
		}
		// Look for initialization of frame pointer, record its stack delta
		FirstBlockProcessed = CurrInst->IsLastInBlock();
		if (!FPSaved) { // still looking for "push <framepointerreg>"
			if (CurrInst->MDIsPushInstr() && CurrInst->GetOperand(0)->MatchesReg(MD_FRAME_POINTER_REG)) {
				FPSaved = true;
			}
		}
		else if (!SPintoFP) { // found "push <framepointerreg>", looking for "fp := sp"
			if ((CurrInst->GetIDAOpcode() == MD_MOVE_INSTRUCTION) && CurrInst->IsAnalyzeable()
				&& (CurrInst->GetFirstDef()->GetOp()->MatchesReg(MD_FRAME_POINTER_REG))
				&& (CurrInst->GetFirstUse()->GetOp()->MatchesReg(MD_STACK_POINTER_REG))) {
				SPintoFP = true;
				this->FramePointerStackDelta = IncomingDelta;
				FirstBlockProcessed = true; // stop looking
				assert(this->UsesFramePointer());
			}
		}
		IncomingDelta += CurrentDelta;
		++InstIter;
	}

	return;
} // end of SMPFunction::FindFramePointerDelta()

// Figure out the different regions of the stack frame, and find the
//  instructions that allocate and deallocate the local variables space
//  on the stack frame.
// The stack frame info will be used to emit stack
//  annotations when Analyze() reaches the stack allocation
//  instruction that sets aside space for local vars.
// Set the address of the instruction at which these
//  annotations should be emitted. This should normally
//  be an instruction such as:  sub esp,48
//  However, for a function with no local variables at all,
//  we will need to determine which instruction should be
//  considered to be the final instruction of the function
//  prologue and return its address.
// Likewise, we find the stack deallocating instruction in
//  the function epilogue.
void SMPFunction::SetStackFrameInfo(void) {

	// Now, find the boundaries between stack frame objects.
	this->BuildLocalVarTable();

	// Get callee-saved regs info for remediation use.
	if ((STARS_BADADDR != this->GetFirstFrameAllocInstAddr()) && this->StackPtrAnalysisSucceeded()) {
		this->MDFindSavedRegs();
	}

	return;
} // end of SMPFunction::SetStackFrameInfo()

// IDA Pro defines the sizes of regions in the stack frame in a way
//  that suits its purposes but not ours. The frsize field of the func_info_t
//  structure measures the distance between the stack pointer and the
//  frame pointer (ESP and EBP in the x86). This region includes some
//  of the callee-saved registers. So, the frregs field only includes
//  the callee-saved registers that are above the frame pointer.
//  x86 standard prologue on gcc/linux:
//    push ebp      ; save old frame pointer
//    mov ebp,esp   ; new frame pointer = current stack pointer
//    push esi      ; callee save reg
//    push edi      ; callee save reg
//    sub esp,34h   ; allocate 52 bytes for local variables
//
//  Notice that EBP acquires its final frame pointer value AFTER the
//  old EBP has been pushed. This means that, of the three callee saved
//  registers, one is above where EBP points and two are below.
//  IDA Pro is concerned with generating readable addressing expressions
//  for items on the stack. None of the callee-saved regs will ever
//  be addressed in the function; they will be dormant until they are popped
//  off the stack in the function epilogue. In order to create readable
//  disassembled code, IDA defines named constant offsets for locals. These
//  offsets are negative values (x86 stack grows downward from EBP toward
//  ESP). When ESP_relative addressing occurs, IDA converts a statement:
//    mov eax,[esp+12]
//  into the statement:
//    mov eax,[esp+3Ch+var_30]
//  Here, 3Ch == 60 decimal is the distance between ESP and EBP, and
//  var_30 is defined to have the value -30h == -48 decimal. So, the
//  "frame size" in IDA Pro is 60 bytes, and a certain local can be
//  addressed in ESP-relative manner as shown, or as [ebp+var_30] for
//  EBP-relative addressing. The interactive IDA user can then edit
//  the name var_30 to something mnemonic, such as "virus_size", and IDA
//  will replace all occurrences with the new name, so that code references
//  automatically become [ebp+virus_size]. As the user proceeds
//  interactively, he eventually produces very understandable code.
// This all makes sense for producing readable assembly text. However,
//  our analyses have a compiler perspective as well as a memory access
//  defense perspective. SMP distinguishes between callee saved regs,
//  which should not be overwritten in the function body, and local
//  variables, which can be written. We view the stack frame in logical
//  pieces: here are the saved regs, here are the locals, here is the
//  return address, etc. We don't care which direction from EBP the
//  callee-saved registers lie; we don't want to lump them in with the
//  local variables. We also don't like the fact that IDA Pro will take
//  the function prologue code shown above and declare frregs=4 and
//  frsize=60, because frsize no longer matches the stack allocation
//  statement sub esp,34h == sub esp,52. We prefer frsize=52 and frregs=12.
// So, the task of this function is to fix these stack sizes in our
//  private data members for the function, while leaving the IDA database
//  alone because IDA needs to maintain its own definitions of these
//  variables.
// Fixing means we will update the data members LocalVarsSize and
//  CalleeSavedRegsSize.
// NOTE: This function is both machine dependent and platform dependent.
//  The prologue and epilogue code generated by gcc-linux is as discussed
//  above, while on Visual Studio and other Windows x86 compilers, the
//  saving of registers other than EBP happens AFTER local stack allocation.
//  A Windows version of the function would expect to see the pushing
//  of ESI and EDI AFTER the sub esp,34h statement.

// How many irrelevant insts in the first basic block should we see before deciding that frame allocation is finished?
#define STARS_POST_CALL_NONALLOC_INSTR_LIMIT 10  // after function call seen
#define STARS_NONALLOC_INSTR_LIMIT 30            // after this many irrelevant insts even if no calls

bool SMPFunction::MDFixFrameInfo(void) {
	int SavedRegsSize = 0;
	int OtherPushesSize = 0;  // besides callee-saved regs
	int OtherAllocsSize = 0;  // additions to stack pointer, other odd instructions in prologue
	long AllocValue = 0;
	bool EBPSaved = false;  // detected push of frame pointer reg
	bool ESPintoEBP = false;  // detected initialization of frame pointer reg with stack pointer reg value
	bool FoundAllocInstr = false; // found simple alloc, e.g. subtraction from stack pointer
	bool DebugFlag = (0 == strcmp("__libc_csu_init", this->GetFuncName()));
	bool CallSeen = false;
	uint16_t PostCallNonAllocInstructions = 0;
	uint16_t NonAllocatingInstructions = 0;
	STARS_ea_t CallAddr = STARS_BADADDR;
	STARS_ea_t SaveAddr = this->GetStartAddr(); // keeps most recent best value for stack frame allocation instr
	list<pair<STARS_ea_t, int> > AllocPointsList; // list of pair<InstAddr, bytes allocated on stack at that addr>

	// Iterate through the first basic block in the function. If we find
	//  a frame allocating Instr in it, then we have local vars. If not,
	//  we don't, and LocalVarsSize should have been zero. Count the callee
	//  register saves leading up to the local allocation. Set data members
	//  according to what we found if the values of the data members would
	//  change.
	SMPBasicBlock *CurrBlock = this->Blocks.front();
	vector<SMPInstr *>::iterator CurrIter = CurrBlock->GetFirstInst();
#if SMP_USE_SSA_FNOP_MARKER
	++CurrIter;  // skip marker instruction
#endif
	for ( ; CurrIter != CurrBlock->GetLastInst(); ++CurrIter) {
		SMPInstr *CurrInstr = (*CurrIter);
		STARS_ea_t InstAddr = CurrInstr->GetAddr();
		if (!CurrInstr->IsAnalyzeable())
			continue;

		if (FoundAllocInstr && (DEFAULT != CurrInstr->GetDataFlowType())) {
			// After we find the frame allocation instruction, a call or any other
			//  control flow instruction should terminate our processing. A stack
			//  frame adjustment could follow a call, which could falely look like another
			//  frame allocation instruction.
			break;
		}
		if (CALL == CurrInstr->GetDataFlowType()) {
			CallSeen = true;
			CallAddr = InstAddr;
		}
		if (CurrInstr->MDIsPushInstr()) {
			SaveAddr = InstAddr;
			// We will make the gcc-linux assumption that a PUSH in
			//  the first basic block, prior to the stack allocating
			//  instruction, is a callee register save. To make this
			//  more robust, we ensure that the register is from
			//  the callee saved group of registers, and that it has
			//  not been defined thus far in the function (else it might
			//  be a push of an outgoing argument to a call that happens
			//  in the first block when there are no locals). **!!!!**
			size_t PushWidth = CurrInstr->GetPushedOpndByteSize();
			pair<STARS_ea_t, int> AllocPair(InstAddr, (int) PushWidth);
			AllocPointsList.push_back(AllocPair);
			if (CurrInstr->MDUsesCalleeSavedReg() && !CurrInstr->HasSourceMemoryOperand()) {
				SavedRegsSize += PushWidth;
				if (DebugFlag) SMP_msg("libc_csu_init SavedRegsSize: %d  %s\n", SavedRegsSize,
					CurrInstr->GetDisasm());
			}
			else {
				// Pushes of outgoing args can be scheduled so that
				//  they are mixed with the pushes of callee saved regs.
				OtherPushesSize += PushWidth;
				if (DebugFlag) SMP_msg("libc_csu_init OtherPushesSize: %d  %s\n", OtherPushesSize,
					CurrInstr->GetDisasm());
			}
			if (!EBPSaved) { // still looking for "push ebp"
				if (CurrInstr->GetOperand(0)->MatchesReg(MD_FRAME_POINTER_REG)) {
					EBPSaved = true;
				}
			}
		}
		else if (CurrInstr->MDIsFrameAllocInstr() && (!CurrInstr->HasAllocaRTL())) {
			if (DebugFlag) SMP_msg("libc_csu_init allocinstr: %s\n", CurrInstr->GetDisasm());
			// Get the size being allocated.
			set<DefOrUse, LessDefUse>::iterator CurrUse;
			for (CurrUse = CurrInstr->GetFirstUse(); CurrUse != CurrInstr->GetLastUse(); ++CurrUse) {
				// Find the immediate operand.
				if (CurrUse->GetOp()->IsImmedOp()) {
					// Get its value into LocalVarsSize.
					AllocValue = (signed long) CurrUse->GetOp()->GetImmedValue();
					// One compiler might have sub esp,24 and another
					//  might have add esp,-24. Take the absolute value.
					if (0 > AllocValue)
						AllocValue = -AllocValue;
					SaveAddr = InstAddr;  // ImmedOp, so now we know this is an alloc and not an alloca() call
					FoundAllocInstr = true;
					pair<STARS_ea_t, int> AllocPair(InstAddr, (int) AllocValue);
					AllocPointsList.push_back(AllocPair);
					break; // only want to process the immediate operand
				} // end if (o_imm == ...)
			} // end for all uses
		} // end if (push) .. elsif frame allocating instr
		else if (CurrInstr->HasStackPointerDEF() && (DEFAULT == CurrInstr->GetDataFlowType())) {
			// Handle all other possible changes to stack pointer register from non-control-flow instructions.
			//  This includes the alloca() inlined call pattern (sub esp,reg) which is assigned a default size.
			if (CurrInstr->MDIsLeaveInstr() && (STARS_BADADDR != this->LocalVarsAllocInstr)) {
				SMP_msg("ERROR: MDFixFrameInfo() found LEAVE opcode at %llx before finding a frame allocation.\n",
					(unsigned long long) InstAddr);
				break;  // should not find a LEAVE opcode before the frame allocation
			}
			STARS_sval_t CurrentDelta = CurrInstr->AnalyzeStackPointerDelta(0, 0);
			if (SMP_STACK_POINTER_BITWISE_AND_CODE == ((STARS_uval_t) CurrentDelta)) {
				// For now, we ignore instructions that AND a constant into the stack pointer.
				SMP_msg("WARNING: Stack pointer bitwise AND ignored at %llx in MDFixFrameInfo\n",
					(unsigned long long) InstAddr);
			}
			else if (SMP_STACK_DELTA_ERROR_CODE == ((STARS_uval_t) CurrentDelta)) {
				SMP_msg("ERROR: Stack delta unanalyzeable at %llx in MDFixFrameInfo\n", (unsigned long long) InstAddr);
			}
			else if (0 < CurrentDelta) { // deallocating stack; might be stack adjustment after a call instruction
				if (CallSeen) { // match the stack adjustment to the allocations before the call and remove them
					STARS_sval_t AllocBytesFound = 0;
					list<pair<STARS_ea_t, int> >::reverse_iterator ListRevIter = AllocPointsList.rbegin();
					while (ListRevIter != AllocPointsList.rend()) {
						STARS_ea_t AllocAddr = (*ListRevIter).first;
						int CurrAllocBytes = (*ListRevIter).second;
						AllocBytesFound += (STARS_sval_t) CurrAllocBytes;
						if (AllocBytesFound >= CurrentDelta) { // found enough bytes to match call adjustment
							++ListRevIter;
							if (ListRevIter != AllocPointsList.rend()) {
								SaveAddr = (*ListRevIter).first;
								OtherPushesSize -= CurrentDelta; // Adjust allocated bytes to remove arg pushes before call inst
								SMP_msg("INFO: Removed arg pushes from stack allocs in %s\n", this->GetFuncName());
							}
							else { // reached beginning of function; no allocs, only arg pushes
								AllocValue = OtherAllocsSize = 0;
								SavedRegsSize = OtherPushesSize = 0;
								SMP_msg("INFO: No stack allocs other than arg pushes in %s\n", this->GetFuncName());
							}
							if (AllocBytesFound > CurrentDelta)
								SMP_msg("ERROR: Arg pushes did not match stack adjustment in %s\n", this->GetFuncName());
							break; // exit while loop
						}
						++ListRevIter;
					}
					if (AllocBytesFound < CurrentDelta) {
						AllocValue = OtherAllocsSize = 0;
						SavedRegsSize = OtherPushesSize = 0;
						SMP_msg("ERROR: Not enough stack allocs to equal call adjustment in first block of %s\n", this->GetFuncName());
					}
				}
				else if (FoundAllocInstr) {
					SMP_msg("WARNING: Stack deallocation at %llx in first block with no call inst.\n", (unsigned long long) InstAddr);
				}
				else { // Bad; no allocs found, but dealloc found
					SMP_msg("ERROR: Stack deallocation at %llx in first block with no alloc instr or call found.\n",
						(unsigned long long) InstAddr);
				}
				break; // exit after stack deallocation of any kind
			}
			else { // success; normal allocation
				int AllocSize = (int) (0 - CurrentDelta);
				if (FoundAllocInstr)
					this->AllocSizeAfterFrameAlloc += AllocSize;
				else
					OtherAllocsSize += AllocSize;
				pair<STARS_ea_t, int> AllocPair(InstAddr, AllocSize);
				AllocPointsList.push_back(AllocPair);
				SMP_msg("INFO: Miscellaneous stack pointer delta of %d found at %llx in MDFixFrameInfo\n", CurrentDelta, (unsigned long long) InstAddr);
			}
		}
		else if (EBPSaved && (!ESPintoEBP)) { // found "push ebp", looking for "mov ebp,esp"
			if ((CurrInstr->GetIDAOpcode() == STARS_NN_mov)
				&& (CurrInstr->GetFirstDef()->GetOp()->MatchesReg(MD_FRAME_POINTER_REG))
				&& (CurrInstr->GetFirstUse()->GetOp()->MatchesReg(MD_STACK_POINTER_REG))) {
				ESPintoEBP = true;
			}
		}
		else {
			++NonAllocatingInstructions;
			if (CallSeen) {
				++PostCallNonAllocInstructions;
				if (STARS_POST_CALL_NONALLOC_INSTR_LIMIT <= PostCallNonAllocInstructions) {
					SMP_msg("INFO: Stopping search of first block after call for frame allocations at %llx\n",
						(unsigned long long) InstAddr);
					break;
				}
			}
			if (STARS_NONALLOC_INSTR_LIMIT <= NonAllocatingInstructions) {
				SMP_msg("INFO: Stopping search of first block for frame allocations at %llx\n",
					(unsigned long long) InstAddr);
				break;
			}
		}
	} // end for all instructions in the first basic block

	this->LocalVarsSize = (STARS_asize_t) AllocValue + (STARS_asize_t) OtherAllocsSize;
	this->CalleeSavedRegsSize = (uint16_t) SavedRegsSize + OtherPushesSize;
	this->LocalVarsAllocInstr = SaveAddr;

	// If we found ESPintoEBP, we also found EBPSaved first, and we need to set
	//  this->UseFP to true.
	this->UseFP = ESPintoEBP;
	if (ESPintoEBP) {
		this->FindFramePointerDelta();
	}

	return true;
} // end of SMPFunction::MDFixFrameInfo()


// IDA Pro is sometimes confused by a function that uses the frame pointer
//  register for other purposes. For the x86, a function that uses EBP
//  as a frame pointer would begin with: push ebp; mov ebp,esp to save
//  the old value of EBP and give it a new value as a frame pointer. The
//  allocation of local variable space would have to come AFTER the move
//  instruction. A function that begins: push ebp; push esi; sub esp,24
//  is obviously not using EBP as a frame pointer. IDA is apparently
//  confused by the push ebp instruction being the first instruction
//  in the function. We will reset UseFP to false in this case.
// The inverse problem happens with a function that begins with instructions
//  other than push ebp; mov ebp,esp; ... etc. but eventually has those
//  instructions in the first basic block. For example, a C compiler generates
//  for the first block of main():
//    lea ecx,[esp+arg0]
//    and esp, 0xfffffff0
//    push dword ptr [ecx-4]
//    push ebp
//    mov ebp,esp
//    push ecx
//    sub esp,<framesize>
//
//  This function is obviously using EBP as a frame pointer, but IDA Pro marks
//  the function as not using a frame pointer. We will reset UseFP to true in
//  this case.
// NOTE: This logic should work for both Linux and Windows x86 prologues.
// NOTE As of July, 2015, this logic was incorporated into MDFixFrameInfo().
#if 0
bool SMPFunction::MDFixUseFP(void) {
	bool OldUseFP = this->UsesFramePointer();
	bool HasLocals = (0 < this->GetLocalVarsSize());
	list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
	STARS_ea_t addr;

#if SMP_USE_SSA_FNOP_MARKER
	++InstIter;  // skip marker instruction
#endif
	SMPInstr *CurrInst;

#if 0
	if (!(this->UseFP)) {
#endif
		// See if we can detect the instruction "push ebp" followed by the instruction
		//  "mov ebp,esp" in the first basic block. The instructions do not have to be
		//  consecutive. If we find them, we will reset UseFP to true.
		bool FirstBlockProcessed = false;
		bool EBPSaved = false;
		bool ESPintoEBP = false;
		do {
			CurrInst = (*InstIter);
			addr = CurrInst->GetAddr();
			FirstBlockProcessed = CurrInst->IsLastInBlock();
			if (!EBPSaved) { // still looking for "push ebp"
				if (CurrInst->MDIsPushInstr() && CurrInst->GetOperand(0)->MatchesReg(MD_FRAME_POINTER_REG)) {
					EBPSaved = true;
				}
			}
			else if (!ESPintoEBP) { // found "push ebp", looking for "mov ebp,esp"
				if ((CurrInst->GetIDAOpcode() == STARS_NN_mov) 
					&& (CurrInst->GetFirstDef()->GetOp()->MatchesReg(MD_FRAME_POINTER_REG))
					&& (CurrInst->GetFirstUse()->GetOp()->MatchesReg(MD_STACK_POINTER_REG))) {
					ESPintoEBP = true;
					FirstBlockProcessed = true; // exit loop
				}
			}
			// We must get EBP set to its frame pointer value before we reach the
			//  local frame allocation instruction (i.e. the subtraction of locals space
			//   from the stack pointer).
			if (HasLocals) {
				FirstBlockProcessed |= (addr >= this->LocalVarsAllocInstr);
			}
			++InstIter;
		} while (!FirstBlockProcessed);
		// If we found ESPintoEBP, we also found EBPSaved first, and we need to change
		//  this->UseFP to true and return true. Otherwise, return false.
		this->UseFP = ESPintoEBP;
		bool changed = (ESPintoEBP != OldUseFP);
		if (changed)
			SMP_msg("INFO: MDFixUseFP toggled UseFP for %s\n", this->GetFuncName());
		return (changed);
#if 0
	} // end if (!(this->UseFP))

	// At this point, this->UseFP must have been true on entry to this method and we will
	//  check whether it should be reset to false.
	while (addr <= this->LocalVarsAllocInstr) {
		set<DefOrUse, LessDefUse>::iterator CurrDef = CurrInst->GetFirstDef();
		while (CurrDef != CurrInst->GetLastDef()) {
			if (CurrDef->GetOp()->MatchesReg(MD_FRAME_POINTER_REG))
				return false; // EBP got set before locals were allocated
			++CurrDef;
		}
		++InstIter;
		CurrInst = (*InstIter);
		addr = CurrInst->GetAddr();
	}
	// If we found no defs of the frame pointer before the local vars
	//  allocation, then the frame pointer register is not being used
	//  as a frame pointer, just as a general callee-saved register.
	this->UseFP = false;
	SMP_msg("INFO: MDFixUseFP reset UseFP to false for %s\n", this->GetFuncName());
	return true;
#endif
} // end of SMPFunction::MDFixUseFP()
#endif

// Find the callee-saved reg offsets (negative offset from return address)
//  for all registers pushed onto the stack before the stack frame allocation
//  instruction.
void SMPFunction::MDFindSavedRegs(void) {
	list<SMPInstr *>::iterator InstIter;
	int RegIndex;
	STARS_Function_t *CurrFunc = SMP_get_func(this->GetStartAddr());
	assert(NULL != CurrFunc);

	for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		if (CurrInst->IsMarkerInst())
			continue;
		if (CurrInst->GetAddr() > this->LocalVarsAllocInstr)
			break;
		if (!(CurrInst->MDIsPushInstr()))
			continue;

		STARS_sval_t CurrOffset = CurrInst->GetStackPtrOffset();
		if (CurrInst->GetIDAOpcode() == STARS_NN_push) {
			STARSOpndTypePtr PushedReg = CurrInst->GetPushedOpnd();
			if (PushedReg->IsRegOp()) {
				RegIndex = (int) PushedReg->GetReg();
				if (RegIndex > global_STARS_program->GetSTARS_MD_LAST_SAVED_REG_NUM()) {
					SMP_msg("WARNING: Skipping save of register %d\n", RegIndex);
					continue;
				}
				if (this->SavedRegLoc.at((std::size_t) RegIndex) == 0) {
					this->SavedRegLoc[(std::size_t) RegIndex] = CurrOffset - PushedReg->GetByteWidth();
				}
				else {
					SMP_msg("WARNING: Multiple saves of register %d\n", RegIndex);
				}
			} // end if register push operand
		} // end if PUSH instruction
		else if (STARS_NN_pusha == CurrInst->GetIDAOpcode()) {
			// NOTE: There is no push-all of the 64-bit regs, so these sizes are correct.
			this->SavedRegLoc[(std::size_t) STARS_x86_R_ax] = CurrOffset - 4;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_cx] = CurrOffset - 8;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_dx] = CurrOffset - 12;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_bx] = CurrOffset - 16;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_sp] = CurrOffset - 20;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_bp] = CurrOffset - 24;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_si] = CurrOffset - 28;
			this->SavedRegLoc[(std::size_t) STARS_x86_R_di] = CurrOffset - 32;
			break; // all regs accounted for
		}
		else if (CurrInst->MDIsEnterInstr()) {
			this->SavedRegLoc[(std::size_t) STARS_x86_R_bp] = CurrOffset - global_STARS_program->GetSTARS_ISA_Bytewidth();
		}
	} // end for all instructions

	return;
} // end of SMPFunction::MDFindSavedRegs()

// Compute the ReturnRegTypes[] as the meet over all register types
//  at all return instructions. Return true if any type was updated.
bool SMPFunction::MDFindReturnTypes(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	vector<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;
	bool changed = false;

	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		if (CurrBlock->HasReturn()) {
			// Get the types of all registers at the RETURN point.
			//  Calculate the meet function over them.
			InstIter = --(CurrBlock->GetLastInst());
			CurrInst = (*InstIter);
			assert(RETURN == CurrInst->GetDataFlowType());
			set<DefOrUse, LessDefUse>::iterator CurrUse;
			for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
				STARSOpndTypePtr UseOp = CurrUse->GetOp();
				if ((! UseOp->IsRegOp()) || (global_STARS_program->GetSTARS_MD_LAST_SAVED_REG_NUM() < UseOp->GetReg()))
					continue;
				SMPOperandType OldType = this->ReturnRegTypes.at(UseOp->GetReg());
				bool TypeErrorFlag = false;
				SMPOperandType NewType = SMPTypeMeet(OldType, CurrUse->GetType(), TypeErrorFlag);
				if (TypeErrorFlag) {
					SMP_msg("ERROR: TypeMeet error in MDFindReturnTypes() func at %llx return at %llx\n",
						(uint64_t) this->GetFirstFuncAddr(), (uint64_t) CurrInst->GetAddr());
				}
				if (NewType != OldType) {
					this->ReturnRegTypes[UseOp->GetReg()] = NewType;
					changed = true;
				}
				// Get the FGInfo for each USE as well.
				int UseSSANum = CurrUse->GetSSANum();
				int DefHashIndex = HashGlobalNameAndSSA(UseOp, UseSSANum);
				struct FineGrainedInfo RegDefFG;
				if (this->IsGlobalName(UseOp)) {
					RegDefFG = this->GetDefFGInfo(DefHashIndex);
				}
				else {
					RegDefFG = CurrBlock->GetDefFGInfo(DefHashIndex);
				}
				// Meet function for FGInfo is a simple bitwise OR operation.
				this->ReturnRegFGInfo[UseOp->GetReg()].SignMiscInfo |= RegDefFG.SignMiscInfo;
				this->ReturnRegFGInfo[UseOp->GetReg()].SizeInfo |= RegDefFG.SizeInfo;
			} // for all USEs in the RETURN instruction
		} // end if current block has a RETURN
	} // end for all blocks
	return changed;
} // end of SMPFunction::MDFindReturnTypes()

// Fill IncomingRegTypes[]
void SMPFunction::MDFindIncomingTypes(void) {
	// For each register USE in the marker inst at the top of the function,
	//  see if we have a consistent type inference at all call sites (i.e.
	//  the USEs of the call instructions that call this function).
	set<STARS_ea_t>::iterator CallSiteIter;
	for (CallSiteIter = this->AllCallSites.begin(); CallSiteIter != this->AllCallSites.end(); ++CallSiteIter) {
		STARS_ea_t CallSiteAddr = (*CallSiteIter);
		STARS_Function_t *CurrFunc = SMP_get_func(CallSiteAddr);
		assert(NULL != CurrFunc);
		STARS_ea_t CallerFirstAddr = CurrFunc->get_startEA();
		SMPFunction *CallerFunc = this->GetProg()->FindFunction(CallerFirstAddr);
		if (nullptr == CallerFunc) {
			SMP_msg("FATAL ERROR: Program has no function at %llx which should be a caller of %llx\n",
				(unsigned long long) CallerFirstAddr, (unsigned long long) this->GetFirstFuncAddr());
		}
		assert(NULL != CallerFunc);
		SMPInstr *CallSiteInst = CallerFunc->GetInstFromAddr(CallSiteAddr);
		// Iterate through USE regs in our marker inst, match them to USEs in call inst.
		SMPInstr *MarkerInst = this->Instrs.front();
		assert(MarkerInst->IsMarkerInst());
		for (set<DefOrUse, LessDefUse>::iterator MarkerDefIter = MarkerInst->GetFirstDef(); MarkerDefIter != MarkerInst->GetLastDef(); ++MarkerDefIter) {
			STARSOpndTypePtr MarkerDefOp = MarkerDefIter->GetOp();
			if (MarkerDefOp->IsRegOp()) {
				// Find same USE in caller inst.
				STARS_regnum_t RegNum = MarkerDefOp->GetReg();
				set<DefOrUse, LessDefUse>::iterator CallSiteUseIter = CallSiteInst->FindUse(MarkerDefOp);
				if ((CallSiteUseIter != CallSiteInst->GetLastUse()) && (RegNum < (decltype(RegNum))this->IncomingRegTypes.size())) {
					// Found it. See if we have type info.
					SMPOperandType CallSiteType = CallSiteUseIter->GetType();
					SMPOperandType OldType = this->IncomingRegTypes[RegNum];
					bool TypeErrorFlag = false;
					this->IncomingRegTypes[RegNum] = SMPTypeMeet(CallSiteType, OldType, TypeErrorFlag);
					if (TypeErrorFlag) {
						SMP_msg("ERROR: TypeMeet error in MDFindIncomingTypes() at %llx\n",
							(uint64_t) this->GetFirstFuncAddr());
					}
					// Note this type meet function is used in an aggressive way here, as
					//  a mixture of UNINIT and NUMERIC at various call sites will produce NUMERIC, for example.
					//  We are not demanding that all call sites have a consistent type with no UNINIT instances.

					// Get the FGInfo (from call site in CallerFunc) for each USE as well.
					int UseSSANum = CallSiteUseIter->GetSSANum();
					int DefHashIndex = HashGlobalNameAndSSA(MarkerDefOp, UseSSANum);
					struct FineGrainedInfo RegDefFG;
					if (CallerFunc->IsGlobalName(MarkerDefOp)) {
						RegDefFG = CallerFunc->GetDefFGInfo(DefHashIndex);
					}
					else {
						RegDefFG = CallSiteInst->GetBlock()->GetDefFGInfo(DefHashIndex);
					}
					// Meet function for FGInfo is a simple bitwise OR operation.
					(void) MarkerInst->UpdateDefOpFGInfo(MarkerDefOp, RegDefFG);
				}
			}
		}
	}
	return;
} // end of SMPFunction::MDFindIncomingTypes()

// Determine boundaries in the stack frame.
void SMPFunction::BuildLocalVarTable(void) {
	this->BuildStackAccessTables();
	return;
} // end of SMPFunction::BuildLocalVarTable()

// Build tables to characterize stack accesses.
void SMPFunction::BuildStackAccessTables(void) {

	STARS_Function_t *FuncPtr = SMP_get_func(this->GetStartAddr());
	if (NULL == FuncPtr) {
		SMP_msg("FATAL ERROR in SMPFunction::SemiNaiveLocalVarID; no func ptr\n");
	}
	assert(NULL != FuncPtr);

	// If AnalyzedSP is false, that is all we can do.
	if (!this->AnalyzedSP) {
		this->OutgoingArgsSize = 0;
		this->MinStackDelta = 0;
		this->AllocPointDelta = 0;
		return;
	}

	// Calculate min and max stack operand offsets accessed.
	list<SMPInstr *>::iterator InstIter;
	InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	if ((*InstIter)->IsMarkerInst())
		++InstIter;  // skip marker instruction
#endif
	for (; InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		STARS_ea_t addr = CurrInst->GetAddr();
		// Find the min and max stack offsets in DEFs and USEs.
		if (CurrInst->HasDestMemoryOperand() || CurrInst->MDIsPushInstr() || CurrInst->MDIsEnterInstr()) {
			set<DefOrUse, LessDefUse>::iterator CurrDef;
			for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
				STARSOpndTypePtr TempOp = CurrDef->GetOp();
				if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
					continue;
				this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
			} // end for all DEFs
		}
		if (CurrInst->HasSourceMemoryOperand() || CurrInst->MDIsPopInstr() || CurrInst->MDIsLeaveInstr() || CurrInst->MDIsLoadEffectiveAddressInstr()) {
			if (CurrInst->MDIsLoadEffectiveAddressInstr()) {
				STARSOpndTypePtr TempOp = CurrInst->GetLeaMemUseOp();
				if (nullptr != TempOp) {
					if (((TempOp->IsMemNoDisplacementOp()) || (TempOp->IsMemDisplacementOp())) && (!(CurrInst->IsRegClearIdiom() || CurrInst->IsNop())))  {
						this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
					}
				}
			}
			else {
				set<DefOrUse, LessDefUse>::iterator CurrUse;
				for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
					STARSOpndTypePtr TempOp = CurrUse->GetOp();
					if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
						continue;
					this->UpdateMinMaxStackOffsets(CurrInst, TempOp);
				} // end for all USEs
			}
		}
	} // end for all instructions
	if (0 == this->MaxStackAccessLimit) {
		// Never accessed any incoming args. However, we know the return address is on the stack,
		//  and it is almost never accessed, so we want to record its presence.
		this->MaxStackAccessLimit = MD_DEFAULT_RETURN_ADDRESS_SIZE;
	}
	if (this->MinStackAccessOffset > this->MinStackDelta) {
		// Some functions allocate space that is not visibly accessed. We still want to make
		//  our stack frame maps of maximum size, and MinStackDelta is used for normalizing offsets.
		this->MinStackAccessOffset = this->MinStackDelta;
	}
	else if (this->MinStackAccessOffset < this->MinStackDelta) {
		// x86-64 leaf functions are often optimized by not allocating a stack frame. Instead,
		//  negative displacements from the stack pointer are used to access locals. So the
		//  stack pointer (reflected in MinStackDelta) never goes down as far as the bottom of
		//  the frame (reflected by MinStackAccessOffset). We need to record that such a function
		//  has been detected so that we don't fail assertions unnecessarily later.
		this->SetStackFrameExtendsPastStackTop();
	}

	if (!this->AnalyzedSP || (STARS_BADADDR == this->LocalVarsAllocInstr)) {
		SMP_msg("FindOutgoingArgsSize not called for %s ", this->GetFuncName());
		SMP_msg("AnalyzedSP: %d CallsAlloca: %d LocalVarsAllocInstr: %llx \n",
			this->AnalyzedSP, this->CallsAlloca, (unsigned long long) this->LocalVarsAllocInstr);
		return;
	}
	

	// Now, do the mapping of instruction stack accesses to the table entries.
	unsigned short BitWidthMask;
	bool DebugFlag = false;
	int SignedOffset;

	if ((0 <= this->MinStackDelta) || (0 <= this->AllocPointDelta)) {
		// No allocations; sometimes happens in library functions.
		this->OutgoingArgsSize = 0;
		this->AllocPointDelta = 0;
		if ((this->MinStackDelta > this->MaxStackDelta) || (0 < this->MinStackDelta)) {
			this->MinStackDelta = 0;
		}
	}
	assert(0 >= this->MinStackDelta);

	// Allocate vectors of stack frame entries, one for each byte of the stack frame.
	// One vector will be for negative stack offsets (e.g. saved regs, local vars) and
	//  the other vector will be for nonnegative offsets (e.g. return address, inargs).
	// Two more vectors will hold fine-grained stack access info.
	//  These will be our memory maps for analyzing stack usage.
	STARS_sval_t AccessTableEntriesCount = this->MaxStackAccessLimit - this->MinStackAccessOffset;
	for (int i = this->MinStackAccessOffset; i < this->MaxStackAccessLimit; ++i) {
		struct StackFrameEntry TempEntry;
		TempEntry.VarPtr = NULL;
		TempEntry.offset = (long) i;
		TempEntry.Read = false;
		TempEntry.Written = false;
		TempEntry.AddressTaken = false;
		TempEntry.ESPRelativeAccess = false;
		TempEntry.EBPRelativeAccess = false;
		TempEntry.IndexedAccess = false;
		struct FineGrainedInfo TempFineGrained;
		TempFineGrained.SignMiscInfo = 0;
		TempFineGrained.SizeInfo = 0;
		if (i < 0) {
			TempEntry.EntryType = STARS_STACK_UNKNOWN;
			this->NegativeOffsetStackFrameMap.push_back(TempEntry);
			this->NegativeOffsetFineGrainedStackTable.push_back(TempFineGrained);
		}
		else {
			if (i < ((int) MD_DEFAULT_RETURN_ADDRESS_SIZE))
				TempEntry.EntryType = STARS_STACK_RETURN_ADDRESS;
			else
				TempEntry.EntryType = STARS_STACK_INARG;
			this->PositiveOffsetStackFrameMap.push_back(TempEntry);
			this->PositiveOffsetFineGrainedStackTable.push_back(TempFineGrained);
		}
	}

	if (0 < this->AllocPointDelta) {
		SMP_msg("FATAL ERROR: AllocPointDelta = %ld in %s\n", (long) this->AllocPointDelta, this->GetFuncName());
	}
	assert(0 >= this->AllocPointDelta);

	// Iterate through all instructions and record stack frame accesses in the StackFrameMaps.
	InstIter = this->Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	if ((*InstIter)->IsMarkerInst())
		++InstIter;  // skip marker instruction
#endif
	for (; InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		STARS_ea_t InstAddr = CurrInst->GetAddr();
		STARS_sval_t sp_delta = CurrInst->GetStackPtrOffset();
		if (0 < sp_delta) {
			// Stack underflow.
			SMP_msg("WARNING: Stack underflow at %llx %s sp_delta: %ld\n", (unsigned long long) InstAddr,
				CurrInst->GetDisasm(), (long)sp_delta);
#if 0
			this->OutgoingArgsComputed = false;
			this->OutgoingArgsSize = 0;
			return;
#endif
		}
		// assert(0 >= sp_delta);
		STARS_ea_t offset;
		std::size_t DataSize;
		bool UsedFramePointer;
		bool IndexedAccess;
		bool SignedMove;
		bool UnsignedMove;
		if (CurrInst->HasDestMemoryOperand() || CurrInst->MDIsPushInstr()) {
			set<DefOrUse, LessDefUse>::iterator CurrDef;
			for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
				STARSOpndTypePtr TempOp = CurrDef->GetOp();
				if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
					continue;
				if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
					IndexedAccess, SignedMove, UnsignedMove)) {
					SignedOffset = (int) offset;
					if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + (int) DataSize) > ((int) AccessTableEntriesCount)))) {
						continue; // Indexed expressions can be within frame even when offset is outside frame
					}
					assert(0 <= SignedOffset);
					SignedOffset += this->MinStackAccessOffset;

					bool ESPRelative = (!(UsedFramePointer || CurrInst->HasFPNormalizedToSP()));
					if (SignedOffset < 0) {
						assert((offset + DataSize - 1) < this->NegativeOffsetStackFrameMap.size());
						for (int j = 0; j < (int) DataSize; ++j) { // offset has zero-based index into negative offset vectors
							this->NegativeOffsetStackFrameMap[offset + j].Written = true;
							this->NegativeOffsetStackFrameMap[offset + j].IndexedAccess = IndexedAccess;
							if (ESPRelative) {
								this->NegativeOffsetStackFrameMap[offset + j].ESPRelativeAccess = true;
							}
							else {
								this->NegativeOffsetStackFrameMap[offset + j].EBPRelativeAccess = true;
							}
						}
					}
					else {
						assert((SignedOffset + DataSize - 1) < this->PositiveOffsetStackFrameMap.size());
						for (int j = 0; j < (int) DataSize; ++j) {
							this->PositiveOffsetStackFrameMap[SignedOffset + j].Written = true;
							this->PositiveOffsetStackFrameMap[SignedOffset + j].IndexedAccess = IndexedAccess;
							if (ESPRelative) {
								this->PositiveOffsetStackFrameMap[SignedOffset + j].ESPRelativeAccess = true;
							}
							else {
								this->PositiveOffsetStackFrameMap[SignedOffset + j].EBPRelativeAccess = true;
							}
						}
					}
					struct FineGrainedInfo StackDefFG;
					BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
					StackDefFG.SizeInfo = BitWidthMask;
					StackDefFG.SignMiscInfo = FG_MASK_WRITTEN;
					if (SignedOffset < 0) { // offset has zero-based index into negative offset vectors
						assert(offset < this->NegativeOffsetFineGrainedStackTable.size());
						this->NegativeOffsetFineGrainedStackTable[offset].SizeInfo |= BitWidthMask;
						this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_WRITTEN;
						if (IndexedAccess) {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
							StackDefFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
						}
						if (ESPRelative) {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_SP_RELATIVE;
							StackDefFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
						}
						else {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_FP_RELATIVE;
							StackDefFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
						}
					}
					else { // SignedOffset >= 0
						assert((size_t)SignedOffset < this->PositiveOffsetFineGrainedStackTable.size());
						this->PositiveOffsetFineGrainedStackTable[SignedOffset].SizeInfo |= BitWidthMask;
						this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_WRITTEN;
						if (IndexedAccess) {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
							StackDefFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
						}
						if (ESPRelative) {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_SP_RELATIVE;
							StackDefFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
						}
						else {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_FP_RELATIVE;
							StackDefFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
						}
					}
					// We will process the signedness of stores later, so that loads can take precedence
					//  over stores in determining signedness in the table. We go ahead and process
					//  signedness for the separate DEF and USE maps by InstAddr.
					if (SignedMove) {
						StackDefFG.SignMiscInfo |= FG_MASK_SIGNED;
					}
					else if (UnsignedMove) {
						StackDefFG.SignMiscInfo |= FG_MASK_UNSIGNED;
					}
					if (!CurrInst->MDIsPushInstr()) { // could just exclude push-all, which had multiple DEFs per InstAddr
						// Insert the StackDefFG into the map of InstAddr to DEF FG info.
						pair<map<STARS_ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
						pair<STARS_ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackDefFG);
						InsertResult = this->StackDefFGInfo.insert(InsertValue);
						if (!InsertResult.second) {
							SMP_msg("FATAL ERROR: Duplicate item in StackDefFGInfo at %llx\n", (unsigned long long) InstAddr);
							SMP_msg("Inst dump follows:\n");
							CurrInst->Dump();
						}
						assert(InsertResult.second);
					}

					// Find saves of registers.
					if ((0 == CurrInst->GetBlock()->GetNumber()) && CurrInst->MDIsPushInstr()) { // push in entry block
						STARSOpndTypePtr PushedOp = CurrInst->GetPushedOpnd();
						if (PushedOp->IsRegOp()) {
							if (SignedOffset < 0) {
								for (int j = 0; j < (int)DataSize; ++j) {
									this->NegativeOffsetStackFrameMap[offset + j].EntryType = STARS_STACK_CALLEE_SAVED_REG;
								}
							}
							else { // odd to have callee-saved regs up here
								for (int j = 0; j < (int)DataSize; ++j) {
									this->PositiveOffsetStackFrameMap[SignedOffset + j].EntryType = STARS_STACK_CALLEE_SAVED_REG;
								}
							}
						}
					}
				} // end if MDGetStackOffsetAndSize()
			} // end for all DEFs
		} // end if DestMemoryOperand

		if (CurrInst->HasSourceMemoryOperand()) {
			set<DefOrUse, LessDefUse>::iterator CurrUse;
			for (CurrUse = CurrInst->GetFirstUse(); CurrUse != CurrInst->GetLastUse(); ++CurrUse) {
				STARSOpndTypePtr TempOp = CurrUse->GetOp();
				if ((!TempOp->IsMemNoDisplacementOp()) && (!TempOp->IsMemDisplacementOp()))
					continue;
				if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
					IndexedAccess, SignedMove, UnsignedMove)) {
					SignedOffset = (int) offset;
					if (IndexedAccess && ((0 > SignedOffset) || ((SignedOffset + (int) DataSize) > ((int) AccessTableEntriesCount)))) {
						continue; // Indexed expressions can be within frame but offset is outside frame
					}
					assert(0 <= SignedOffset);
					SignedOffset += this->MinStackAccessOffset;

					bool ESPRelative = (!(UsedFramePointer || CurrInst->HasFPNormalizedToSP()));
					if (SignedOffset < 0) { // offset has zero-based index into negative offset vectors
						assert((offset + DataSize - 1) < this->NegativeOffsetStackFrameMap.size());
						for (int j = 0; j < (int) DataSize; ++j) {
							this->NegativeOffsetStackFrameMap[offset + j].Read = true;
							this->NegativeOffsetStackFrameMap[offset + j].IndexedAccess = IndexedAccess;
							if (ESPRelative) {
								this->NegativeOffsetStackFrameMap[offset + j].ESPRelativeAccess = true;
							}
							else {
								this->NegativeOffsetStackFrameMap[offset + j].EBPRelativeAccess = true;
							}
						}
					}
					else {
						assert((SignedOffset + DataSize - 1) < this->PositiveOffsetStackFrameMap.size());
						for (int j = 0; j < (int)DataSize; ++j) {
							this->PositiveOffsetStackFrameMap[SignedOffset + j].Read = true;
							this->PositiveOffsetStackFrameMap[SignedOffset + j].IndexedAccess = IndexedAccess;
							if (ESPRelative) {
								this->PositiveOffsetStackFrameMap[SignedOffset + j].ESPRelativeAccess = true;
							}
							else {
								this->PositiveOffsetStackFrameMap[SignedOffset + j].EBPRelativeAccess = true;
							}
						}
					}
					struct FineGrainedInfo StackUseFG;
					BitWidthMask = ComputeOperandBitWidthMask(TempOp, DataSize);
					StackUseFG.SizeInfo = BitWidthMask;
					StackUseFG.SignMiscInfo = FG_MASK_READ;
					if (SignedOffset < 0) { // offset has zero-based index into negative offset vectors
						assert(offset < this->NegativeOffsetFineGrainedStackTable.size());
						this->NegativeOffsetFineGrainedStackTable[offset].SizeInfo |= BitWidthMask;
						this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_READ;
						if (IndexedAccess) {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
							StackUseFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
						}
						if (ESPRelative) {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_SP_RELATIVE;
							StackUseFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
						}
						else {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_FP_RELATIVE;
							StackUseFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
						}
						if (SignedMove) {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_SIGNED;
							StackUseFG.SignMiscInfo |= FG_MASK_SIGNED;
						}
						else if (UnsignedMove) {
							this->NegativeOffsetFineGrainedStackTable[offset].SignMiscInfo |= FG_MASK_UNSIGNED;
							StackUseFG.SignMiscInfo |= FG_MASK_UNSIGNED;
						}
					}
					else { // SignedOffset >= 0
						assert(SignedOffset < (decltype(SignedOffset))this->PositiveOffsetFineGrainedStackTable.size());
						this->PositiveOffsetFineGrainedStackTable[SignedOffset].SizeInfo |= BitWidthMask;
						this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_READ;
						if (IndexedAccess) {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
							StackUseFG.SignMiscInfo |= FG_MASK_INDEXED_ACCESS;
						}
						if (ESPRelative) {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_SP_RELATIVE;
							StackUseFG.SignMiscInfo |= FG_MASK_SP_RELATIVE;
						}
						else {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_FP_RELATIVE;
							StackUseFG.SignMiscInfo |= FG_MASK_FP_RELATIVE;
						}
						if (SignedMove) {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_SIGNED;
							StackUseFG.SignMiscInfo |= FG_MASK_SIGNED;
						}
						else if (UnsignedMove) {
							this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= FG_MASK_UNSIGNED;
							StackUseFG.SignMiscInfo |= FG_MASK_UNSIGNED;
						}
					}
					// Insert the StackUseFG into the map of InstAddr to USE FG info.
					pair<map<STARS_ea_t, struct FineGrainedInfo>::iterator, bool> InsertResult;
					pair<STARS_ea_t, struct FineGrainedInfo> InsertValue(InstAddr, StackUseFG);
					InsertResult = this->StackUseFGInfo.insert(InsertValue);
					assert(InsertResult.second);
				} // end if MDGetStackOffsetAndSize()
			} // end for all USEs
		} // end if SourceMemoryOperand
		// NOTE: Detect taking the address of stack locations. **!!**
	} // end for all instructions

	this->GoodLocalVarTable = true;
	this->OutgoingArgsComputed = true;

	// If function is a leaf function, set OutgoingArgsSize to zero and return.
	// If function has no local frame allocation, ditto.
	if ((this->IsLeaf() && !(this->IsDirectlyRecursive())) || (this->AllocPointDelta == 0)) {
		this->OutgoingArgsSize = 0;
		if (0 == this->AllocPointDelta)
			return;
	}
	else {

		// For non-leaf functions, set the OutgoingArgsSize to the write-only, ESP-relative
		//  region of the bottom of the NegativeOffsetStackFrameMap.
		bool OutgoingArgsRegionFinished = false;
		bool IndexedOutgoingArgs = false; // Any indexed accesses to outgoing args?
		std::size_t FramePadSize = 0;
		std::size_t AlignmentPadSize = 0; // bottom of frame, unused space below outargs
		for (std::size_t MapIndex = 0; MapIndex < this->NegativeOffsetStackFrameMap.size(); ++MapIndex) {
			// Some of the bottom of the stack frame might be below the local frame allocation.
			//  These are pushes that happened after allocation, etc. We skip over these
			//  locations and define the outgoing args region to start strictly at the bottom
			//  of the local frame allocation.
			struct StackFrameEntry TempEntry = this->NegativeOffsetStackFrameMap[MapIndex];
#if 0
			if (DebugFlag) {
				SMP_msg("StackFrameMap entry %zu: offset: %ld Read: %d Written: %d ESP: %d EBP: %d\n",
					MapIndex, TempEntry.offset, TempEntry.Read, TempEntry.Written,
					TempEntry.ESPRelativeAccess, TempEntry.EBPRelativeAccess);
			}
#endif
			if (TempEntry.offset < this->AllocPointDelta)
				continue; // don't look below the stack frame allocation region
			if (STARS_STACK_CALLEE_SAVED_REG == this->NegativeOffsetStackFrameMap[MapIndex].EntryType)
				break;

			if (OutgoingArgsRegionFinished) {
				// We are just processing the stack frame padding.
				if (!TempEntry.Read && !TempEntry.Written) {
					// Could be stack frame padding.
					++FramePadSize;
				}
				else {
					break; // No more padding region
				}
			}
			else if ((this->OutgoingArgsSize == 0) && (!TempEntry.Read) && (!TempEntry.Written)) {
				// We have not started accumulating outgoing args bytes, we have reached the
				//  AllocPointDelta, yet we find space that is neither written nor read. This
				//  empty space at the bottom of the stack frame could just be for stack alignment
				//  purposes, especially in the new x86-64 ABI, so it should not prevent us from
				//  finding outgoing args space above it.
				++AlignmentPadSize;
			}
			else if (TempEntry.Read || TempEntry.EBPRelativeAccess || !TempEntry.Written
				|| !TempEntry.ESPRelativeAccess) {
				OutgoingArgsRegionFinished = true;
				if (!TempEntry.Read && !TempEntry.Written) {
					// Could be stack frame padding.
					++FramePadSize;
				}
				else {
					break; // No padding region
				}
			}
			else {
				this->OutgoingArgsSize++;
				if (TempEntry.IndexedAccess) {
					IndexedOutgoingArgs = true;
				}
			}
		}

		// Add in the alignment padding below the written outargs region.
		if (this->OutgoingArgsSize > 0) {
			this->OutgoingArgsSize += AlignmentPadSize;
		}

		// If any outgoing arg was accessed using an index register, then we don't know how high
		//  the index register value went. It could potentially consume the so-called padding
		//  region, which might be just the region we did not detect direct accesses to because
		//  the accesses were indirect. To be safe, we expand the outgoing args region to fill
		//  the padding region above it in this indexed access case.
		if (IndexedOutgoingArgs) {
			this->OutgoingArgsSize += FramePadSize;
		}

		// Mark the outgoing args entries in the map.
		// Could have stack accesses below stack frame allocation. Don't go below the frame allocation for outargs
		//  !!!!****!!!! Might need to change for Windows and other stack frame conventions.
		int AllocIndex = ((int) this->NegativeOffsetStackFrameMap.size()) + (int) this->AllocPointDelta;
		// e.g. if 20 bytes in map but -16 is AllocPointDelta, then bottom four bytes are below the frame.
		assert(0 <= AllocIndex);
		for (size_t index = (size_t) AllocIndex; index < (AllocIndex + this->OutgoingArgsSize); ++index) {
			this->NegativeOffsetStackFrameMap[index].EntryType = STARS_STACK_OUTARG;
		}
	}

	// Mark the local vars entries in the map.
	bool LocalFrameMarked = false;
	int AllocIndex = ((int) this->NegativeOffsetStackFrameMap.size()) + (int) this->AllocPointDelta;
	// e.g. if 20 bytes in map but -16 is AllocPointDelta, then bottom four bytes are below the frame.
	assert(0 <= AllocIndex);
	for (size_t index = AllocIndex + this->OutgoingArgsSize; index < this->NegativeOffsetStackFrameMap.size(); ++index) {
		if (this->NegativeOffsetStackFrameMap[index].EntryType == STARS_STACK_UNKNOWN) {
			this->NegativeOffsetStackFrameMap[index].EntryType = STARS_STACK_LOCAL_FRAME;
			LocalFrameMarked = true;
		}
		else if (LocalFrameMarked) {
			// Marked some local frame entries, and now we are running up into something besides the local frame,
			//  like callee-saved regs. We don't want to go above them and mark a second local frame area, which might
			//  be a save from memory, for example: push [ecx+4] ; push ebx; push edx; sub esp,32 should be treated as
			//  4 unknown bytes at top, 8 callee-saved reg bytes, then 32 local frame bytes. We don't want to keep
			//  processing and mark the 4 unknown bytes as another local var frame.
			break;
		}
	}

	return;

} // end of SMPFunction::BuildStackAccessTables()

// Update MinStackAccessOffset and MaxStackAccessLimit if TempOp is stack access
void SMPFunction::UpdateMinMaxStackOffsets(SMPInstr *CurrInst, const STARSOpndTypePtr &TempOp) {
	STARS_ea_t offset;
	std::size_t DataSize;
	bool UsedFramePointer;
	bool IndexedAccess;
	bool SignedMove;
	bool UnsignedMove;

	if (this->MDGetStackOffsetAndSize(CurrInst, TempOp, this->MinStackDelta, offset, DataSize, UsedFramePointer,
		IndexedAccess, SignedMove, UnsignedMove)) {
		int SignedOffset = (int) offset + (int) this->MinStackDelta; // Don't want zero-based for min/max finding
		if (((STARS_sval_t) SignedOffset) < this->MinStackAccessOffset) {
			this->MinStackAccessOffset = (STARS_sval_t) SignedOffset;
		}
		if (((STARS_sval_t)(SignedOffset + (int) DataSize)) > this->MaxStackAccessLimit) {
			this->MaxStackAccessLimit = (STARS_sval_t)(SignedOffset + (int) DataSize);
		}
	}
	return;
} // end of SMPFunction::UpdateMinMaxStackOffsets()


// If TempOp reads or writes to a stack location, return the offset (relative to the initial
//  stack pointer value) and the size in bytes of the data access. Also return whether the
//  access was frame-pointer-relative, and whether signedness can be inferred due to a load
//  from the stack being zero-extended or sign-extended.
// NOTE: This function assumes that offsets are already normalized. i.e. the TempOp argument
//  should always come from a DEF or USE that has been normalized to the stack delta at function entry.
// NOTE: TempOp must be of type o_displ or o_phrase, as no other operand type could be a
//  stack memory access.
// BaseValue is either this->MinStackAccessOffset, or this->MinStackDelta (when this->MinStackAccessOffset is still
//  being computed).
// Return true if a stack memory access was found in TempOp, false otherwise.
bool SMPFunction::MDGetStackOffsetAndSize(SMPInstr *Instr, const STARSOpndTypePtr &TempOp, STARS_sval_t BaseValue, STARS_ea_t &offset, std::size_t &DataSize, bool &FP,
										  bool &Indexed, bool &Signed, bool &Unsigned) {
	int BaseReg;
	int IndexReg;
	uint16_t ScaleFactor;
	int SignedOffset;
	STARS_sval_t sp_delta = Instr->GetStackPtrOffset();
	STARS_ea_t InstAddr = Instr->GetAddr(); // helps debugging

	if (nullptr == TempOp)
		return false;
	if (!((TempOp->IsMemNoDisplacementOp()) || (TempOp->IsMemDisplacementOp())))
		return false;
	MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);

	if (TempOp->IsMemNoDisplacementOp()) {
		assert(offset == 0);  // implicit zero, as in [esp] ==> [esp+0]
	}

	SignedOffset = (int) offset;  // avoid sign errors during adjustment arithmetic

	if ((BaseReg == STARS_x86_R_sp) || (IndexReg == STARS_x86_R_sp)) {
		// ESP-relative constant offset
		if (!Instr->AreDefsNormalized()) {
			SignedOffset += sp_delta; // base offsets from entry ESP value
		}
		SignedOffset -= BaseValue; // convert to StackFrameMap index
		offset = (STARS_ea_t) SignedOffset; // write back to outgoing argument
		// Get size of data written
		DataSize = TempOp->GetByteWidth();
		FP = false;
		Indexed = ((BaseReg != STARS_x86_R_none) && (IndexReg != STARS_x86_R_none)); // two regs used
		unsigned short opcode = Instr->GetIDAOpcode();
		Unsigned = (opcode == STARS_NN_movzx);
		Signed = (opcode == STARS_NN_movsx);
		if ((0 > SignedOffset) && (!Indexed) && (BaseValue == this->MinStackAccessOffset) && (!this->DoesStackFrameExtendPastStackTop())) {
			// Consider asserting here.
			SMP_msg("ERROR: Negative offset in MDGetStackOffsetAndSize at %llx for inst dump: \n", 
				(unsigned long long) Instr->GetAddr());
			Instr->Dump();
		}
		return true;
	}
	else if (this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG))) {
		SignedOffset += this->GetFramePtrStackDelta(); // base offsets from entry ESP value, e.g. ebp-4 == esp-8
		SignedOffset -= BaseValue; // convert to StackFrameMap index
		offset = (STARS_ea_t) SignedOffset;
		DataSize = TempOp->GetByteWidth();
		FP = true;
		Indexed = ((BaseReg != STARS_x86_R_none) && (IndexReg != STARS_x86_R_none)); // two regs used
#if 0
		assert(Indexed || (!this->StackPtrAnalysisSucceeded()) || !this->HasSTARSStackPtrAnalysisCompleted()); // Else we should never get here with unnormalized stack operands
#else
		if (!(Indexed || (!this->StackPtrAnalysisSucceeded()) || !this->HasSTARSStackPtrAnalysisCompleted())) {
			SMP_msg("WARNING: Unnormalized FP-relative stack offset at %llx after stack analysis succeeded.\n",
				(unsigned long long) Instr->GetAddr());
		}
#endif
		unsigned short opcode = Instr->GetIDAOpcode();
		Unsigned = (opcode == STARS_NN_movzx);
		Signed = (opcode == STARS_NN_movsx);
		if ((0 > SignedOffset) && (!Indexed) && (BaseValue == this->MinStackAccessOffset)) {
			// Consider asserting here.
			SMP_msg("ERROR: Negative offset %d in MDGetStackOffsetAndSize: frregs: %d MinStackDelta: %ld Inst dump: \n",
				SignedOffset, this->CalleeSavedRegsSize, (long) this->MinStackDelta);
			Instr->Dump();
		}
		return true;
	}
	else {
		return false;
	}
} // end of SMPFunction::MDGetStackOffsetAndSize()
		
// Return fine grained stack entry for stack op TempOp from instruction at InstAddr
bool SMPFunction::MDGetFGStackLocInfo(STARS_ea_t InstAddr, const STARSOpndTypePtr &TempOp, struct FineGrainedInfo &FGEntry) {
	int BaseReg;
	int IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	int SignedOffset;

	assert((TempOp->IsMemNoDisplacementOp()) || (TempOp->IsMemDisplacementOp()));
	MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);

	SignedOffset = (int) offset;

	if (TempOp->IsMemNoDisplacementOp()) {
		assert(SignedOffset == 0);  // implicit zero, as in [esp] ==> [esp+0]
	}
	if ((BaseReg == MD_STACK_POINTER_REG) || (IndexReg == MD_STACK_POINTER_REG)) {
		// ESP-relative constant offset
		SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
	}
	else if (this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG))) {
		assert(false); // should never get here with unnormalized stack operand
		SignedOffset += this->GetFramePtrStackDelta(); // base offsets from entry ESP value
		SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
	}
	else {
		return false;
	}
	// We did not return false, so we should have a good offset. Use it to
	//  pass back the fine grained stack table entry for that offset.
	if (SignedOffset >= (int) this->NegativeOffsetFineGrainedStackTable.size()) {
		SignedOffset -= (int) this->NegativeOffsetFineGrainedStackTable.size();
		assert(SignedOffset < ((int) this->PositiveOffsetFineGrainedStackTable.size()));
		FGEntry = this->PositiveOffsetFineGrainedStackTable.at((std::size_t) SignedOffset);
	}
	else {
		assert(SignedOffset >= 0);
		FGEntry = this->NegativeOffsetFineGrainedStackTable.at((std::size_t) SignedOffset);
	}
	return true;
} // end of SMPFunction::MDGetFGStackLocInfo()

// Return true if we update fine grained stack entry for stack op TempOp from instruction at InstAddr
bool SMPFunction::MDUpdateFGStackLocInfo(STARS_ea_t InstAddr, const STARSOpndTypePtr &TempOp, struct FineGrainedInfo NewFG) {
	int BaseReg;
	int IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	int SignedOffset;
	struct FineGrainedInfo OldFG, UnionFG;

	assert((TempOp->IsMemNoDisplacementOp()) || (TempOp->IsMemDisplacementOp()));
	MDExtractAddressFields(TempOp, BaseReg, IndexReg, ScaleFactor, offset);

	SignedOffset = (int) offset;

	if (TempOp->IsMemNoDisplacementOp()) {
		assert(SignedOffset == 0);  // implicit zero, as in [esp] ==> [esp+0]
	}
	if ((BaseReg == MD_STACK_POINTER_REG) || (IndexReg == MD_STACK_POINTER_REG)) {
		// ESP-relative constant offset
		SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
	}
	else if (this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG))) {
		assert(false); // should never get here with unnormalized stack operands
		SignedOffset += this->GetFramePtrStackDelta(); // base offsets from entry ESP value
		SignedOffset -= this->MinStackAccessOffset; // convert to StackFrameMap index
	}
	else {
		return false;
	}
	// We did not return false, so we should have a good offset. Use it to
	//  retrieve the fine grained stack table entry for that offset.
	if (SignedOffset >= (int) this->NegativeOffsetFineGrainedStackTable.size()) { // in Positive offset table
		SignedOffset -= (int) this->NegativeOffsetFineGrainedStackTable.size();
		assert(SignedOffset < ((int) this->PositiveOffsetFineGrainedStackTable.size()));
		OldFG = this->PositiveOffsetFineGrainedStackTable.at((std::size_t) SignedOffset);
		UnionFG.SignMiscInfo = OldFG.SignMiscInfo | NewFG.SignMiscInfo;
		UnionFG.SizeInfo = OldFG.SizeInfo | NewFG.SizeInfo;
		if ((OldFG.SignMiscInfo != UnionFG.SignMiscInfo) || (OldFG.SizeInfo != UnionFG.SizeInfo)) {
			// The signs they are a-changin'. Or maybe the sizes.
			this->PositiveOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= NewFG.SignMiscInfo;
			this->PositiveOffsetFineGrainedStackTable[SignedOffset].SizeInfo |= NewFG.SizeInfo;
		}
	}
	else if (this->OutgoingArgsComputed && (((std::size_t) SignedOffset) < this->OutgoingArgsSize)) {
		// We don't want to update the outgoing args region, as it will not be consistent
		//  over multiple function calls. NOTE: We could fine tune this by seeing if we
		//  call mutliple target functions or not; if only one, then outgoing args region
		//  would be consistent in the absence of varargs targets.
		return false;
	}
	else { // local var access in NegativeOffsetFineGrainedStackTable
		OldFG = this->NegativeOffsetFineGrainedStackTable.at((std::size_t) SignedOffset);
		UnionFG.SignMiscInfo = OldFG.SignMiscInfo | NewFG.SignMiscInfo;
		UnionFG.SizeInfo = OldFG.SizeInfo | NewFG.SizeInfo;
		if ((OldFG.SignMiscInfo != UnionFG.SignMiscInfo) || (OldFG.SizeInfo != UnionFG.SizeInfo)) {
			// The signs they are a-changin'. Or maybe the sizes.
			this->NegativeOffsetFineGrainedStackTable[SignedOffset].SignMiscInfo |= NewFG.SignMiscInfo;
			this->NegativeOffsetFineGrainedStackTable[SignedOffset].SizeInfo |= NewFG.SizeInfo;
		}
	}
	return true;
} // end of SMPFunction::MDUpdateFGStackLocInfo()

// retrieve from GlobalDefAddrBySSA or return BADADDR
STARS_ea_t SMPFunction::GetGlobalDefAddrForRegHash(int RegHashValue) {
	STARS_ea_t DefAddr = STARS_BADADDR; // STARS_BADADDR means we did not find it
	map<int, STARS_ea_t>::iterator MapResult = this->GlobalDefAddrBySSA.find(RegHashValue);
	if (MapResult != this->GlobalDefAddrBySSA.end()) { // Found it.
		DefAddr = (STARS_ea_t) MapResult->second;
	}
	return DefAddr;
} // end of SMPFunction::GetGlobalDefAddrForRegHash()

// retrieve DEF addr from GlobalDefAddrBySSA or return STARS_BADADDR
STARS_ea_t SMPFunction::GetGlobalDefAddr(const STARSOpndTypePtr &DefOp, int SSANum) {
	STARS_ea_t DefAddr = STARS_BADADDR; // STARS_BADADDR means we did not find it
	bool RegDef = (DefOp->IsRegOp());

	if (RegDef) {
		int HashedName = HashGlobalNameAndSSA(DefOp, SSANum);
		DefAddr = this->GetGlobalDefAddrForRegHash(HashedName);
	}
	else if (MDIsDirectStackAccessOpnd(DefOp, this->UsesFramePointer())) {
		int64_t HashedName = HashGlobalStackNameAndSSA(DefOp, SSANum, this->UsesFramePointer());
		map<int64_t, STARS_ea_t>::iterator MapResult = this->GlobalStackDefAddrBySSA.find(HashedName);
		if (MapResult != this->GlobalStackDefAddrBySSA.end()) { // Found it.
			DefAddr = (STARS_ea_t) MapResult->second;
		}
	}
	else if (DefOp->IsMemOp()) {
		// Until we get indirect memory operands into the GlobalDefAddrBySSA map,
		//  do a linear search. Search only blocks with indirect memory writes.
		list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
		set<DefOrUse, LessDefUse>::iterator DefIter;
		SMPInstr *CurrInst = (*InstIter);
		if (CurrInst->IsMarkerInst()) {
			if (0 == SSANum) { // Live-in-to-func stack locations get DEF at marker inst.
				DefIter = CurrInst->FindDef(DefOp);
				if (DefIter != CurrInst->GetLastDef()) {
					// Found it. Must be SSA 0.
					assert(0 == DefIter->GetSSANum());
					DefAddr = CurrInst->GetAddr();
					return DefAddr;
				}
			}
			++InstIter;
		}

		for (size_t BlockNum = 0; BlockNum < this->GetNumBlocks(); ++BlockNum) {
			SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);
			if (CurrBlock->HasIndirectMemWrite()) {
				vector<SMPInstr *>::const_iterator InstVecIter;
				for (InstVecIter = CurrBlock->GetFirstConstInst(); InstVecIter != CurrBlock->GetLastConstInst(); ++InstVecIter) {
					CurrInst = (*InstIter);
					if (CurrInst->HasDestMemoryOperand()) {
						STARSOpndTypePtr MemDefOp = CurrInst->GetMemDef();
						if (IsEqOp(DefOp, MemDefOp)) {
							DefIter = CurrInst->FindDef(MemDefOp);
							assert(DefIter != CurrInst->GetLastDef());
							int DefSSANum = DefIter->GetSSANum();
							if (DefSSANum == SSANum) { // found it
								DefAddr = CurrInst->GetAddr();
								break;
							}
						}
					}
					else if (CurrInst->MDIsPushInstr()) { // merge if and else branches here ???
						DefIter = CurrInst->FindDef(DefOp);
						if (DefIter != CurrInst->GetLastDef()) {
							int DefSSANum = DefIter->GetSSANum();
							if (DefSSANum == SSANum) { // found it
								DefAddr = CurrInst->GetAddr();
								break;
							}
						}
					}
				} // end for all insts in current block
			}
		} // end for all blocks
#if 0 // speed up
		for ( ; InstIter != this->Instrs.end(); ++InstIter) {
			CurrInst = (*InstIter);
			if (CurrInst->HasDestMemoryOperand()) {
				STARSOpndTypePtr MemDefOp = CurrInst->GetMemDef();
				if (IsEqOp(DefOp, MemDefOp)) {
					DefIter = CurrInst->FindDef(MemDefOp);
					assert(DefIter != CurrInst->GetLastDef());
					int DefSSANum = DefIter->GetSSANum();
					if (DefSSANum == SSANum) { // found it
						DefAddr = CurrInst->GetAddr();
						break;
					}
				}
			}
			else if (CurrInst->MDIsPushInstr()) { // merge if and else branches here ???
				DefIter = CurrInst->FindDef(DefOp);
				if (DefIter != CurrInst->GetLastDef()) {
					int DefSSANum = DefIter->GetSSANum();
					if (DefSSANum == SSANum) { // found it
						DefAddr = CurrInst->GetAddr();
						break;
					}
				}
			}
		}
#endif
	}
	return DefAddr;
} // end of SMPFunction::GetGlobalDefAddr()

// Track global DefOp+SSANum to its DEF block, return the inst iter within that block.
vector<SMPInstr *>::iterator SMPFunction::GetBlockInstIterBySSA(const STARSOpndTypePtr &DefOp, int SSANum) {
	assert(this->IsGlobalName(DefOp));
	STARS_ea_t DefAddr = this->GetGlobalDefAddr(DefOp, SSANum);
	assert(STARS_BADADDR != DefAddr);
	if (STARS_IsBlockNumPseudoID(DefAddr)) {
		size_t BlockNum = STARS_GetBlockNumFromPseudoID(DefAddr);
		SMPBasicBlock *PhiBlock = this->GetBlockByNum(BlockNum);
		return PhiBlock->GetFirstInst();
	}
	else {
		SMPBasicBlock *DefBlock = this->GetBlockFromInstAddr(DefAddr);
		return DefBlock->GetInstIterFromAddr(DefAddr);
	}
} // end of SMPFunction::GetBlockInstIterBySSA()

int SMPFunction::GetBlockNumForPhiDef(const STARSOpndTypePtr &DefOp, int SSANum) {
	std::size_t BlockIndex;
	for (BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		SMPBasicBlock *CurrBlock = this->RPOBlocks.at(BlockIndex);
		set<SMPPhiFunction, LessPhi>::iterator PhiIter = CurrBlock->FindPhi(DefOp);
		if (PhiIter != CurrBlock->GetLastPhi()) {
			if (PhiIter->GetDefSSANum() == SSANum) {
				return CurrBlock->GetNumber();
			}
		}
	}
	return (int) STARS_BADADDR;
} // end of SMPFunction::GetBlockNumForPhiDef()

// Retrieve block iterator for InstAddr from InstBlockMap; assert if failure
SMPBasicBlock *SMPFunction::GetBlockFromInstAddr(STARS_ea_t InstAddr) {
	map<STARS_ea_t, SMPBasicBlock *>::iterator MapEntry;
	MapEntry = this->InstBlockMap.find(InstAddr);
	assert(MapEntry != this->InstBlockMap.end());
	return MapEntry->second;
}

// return -1 if not in InstBlockMap, block # otherwise
int SMPFunction::GetBlockNumFromInstAddr(STARS_ea_t InstAddr) {
	map<STARS_ea_t, SMPBasicBlock *>::iterator MapEntry = this->InstBlockMap.find(InstAddr);
	if (MapEntry != this->InstBlockMap.end()) {
		return MapEntry->second->GetNumber();
	}
	else {
		return SMP_BLOCKNUM_UNINIT;
	}
}

// Find FindBlock in Blocks, return iterator.
list<SMPBasicBlock *>::iterator SMPFunction::GetBlockIter(SMPBasicBlock *FindBlock) {
	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		if (FindBlock == (*BlockIter)) {
			break;
		}
	}
	return BlockIter;
}

// Retrieve inst pointer for InstAddr; assert if failure on block find.
SMPInstr *SMPFunction::GetInstFromAddr(STARS_ea_t InstAddr) {
	SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(InstAddr);
	SMPInstr *CurrInst = CurrBlock->FindInstr(InstAddr);
	return CurrInst;
}

int SMPFunction::GetLoopNumFromTestBlockNum(int BlockNum) const {
	int LoopNum = -1;
	for (std::size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		if (BlockNum == this->LoopTestBlocksByLoopNum.at(LoopIndex)) {
			LoopNum = (int) LoopIndex;
			break;
		}
	}
	return LoopNum;
}

// find innermost loop # for BlockNum
int SMPFunction::GetInnermostLoopNum(const int BlockNum) const {
	int LoopNum = -1;
	size_t MinBlockCount = 100000;
	if ((BlockNum >= 0) && (this->LoopCount > 0) && this->IsBlockInAnyLoop(BlockNum)) {
		// Find loop containing this BlockNum with the fewest number of blocks.
		for (size_t LoopIndex = 0; LoopIndex < this->FuncLoopsByBlock[(size_t) BlockNum].GetNumBits(); ++LoopIndex) {
			if (this->FuncLoopsByBlock[(size_t) BlockNum].GetBit(LoopIndex)) {
				size_t CurrBlockCount = this->FuncBlocksByLoop[LoopIndex].CountSetBits();
				if (CurrBlockCount < MinBlockCount) {
					MinBlockCount = CurrBlockCount;
					LoopNum = (int) LoopIndex;
				}
			}
		}
	}
	return LoopNum;
} // end of SMPFunction::GetInnermostLoopNum()

// find outermost loop # for BlockNum
int SMPFunction::GetOutermostLoopNum(const int BlockNum) const {
	int LoopNum = -1;
	size_t MaxBlockCount = 0;
	if ((BlockNum >= 0) && (this->LoopCount > 0) && this->IsBlockInAnyLoop(BlockNum)) {
		// Find loop containing this BlockNum with the fewest number of blocks.
		for (size_t LoopIndex = 0; LoopIndex < this->FuncLoopsByBlock[(size_t) BlockNum].GetNumBits(); ++LoopIndex) {
			if (this->FuncLoopsByBlock[(size_t) BlockNum].GetBit(LoopIndex)) {
				size_t CurrBlockCount = this->FuncBlocksByLoop[LoopIndex].CountSetBits();
				if (CurrBlockCount > MaxBlockCount) {
					MaxBlockCount = CurrBlockCount;
					LoopNum = (int) LoopIndex;
				}
			}
		}
	}
	return LoopNum;
}

// Given block # and PhiDef STARSOpndType and SSANum, return the Phi iterator or assert.
set<SMPPhiFunction, LessPhi>::iterator SMPFunction::GetPhiIterForPhiDef(std::size_t BlockNumber, const STARSOpndTypePtr &DefOp, int SSANum) {
	SMPBasicBlock *DefBlock = this->RPOBlocks.at(BlockNumber);
	set<SMPPhiFunction, LessPhi>::iterator PhiIter = DefBlock->FindPhi(DefOp);
	assert(PhiIter != DefBlock->GetLastPhi());
	return PhiIter;
}

// push_back new entries on switch data structures
bool SMPFunction::AddSwitchTableInfo(STARS_ea_t IndirJumpAddr, struct SwitchTableInfo TableInfo) {
	bool success = true;
	size_t SwitchIndex = this->GetSwitchJumpMapSize(); // first index is zero, etc.
	assert(this->GetSwitchInfoArraySize() == SwitchIndex);  // data structure sizes must always be in sync
	pair<STARS_ea_t, size_t> JumpMapItem(IndirJumpAddr, SwitchIndex);
	pair<map<STARS_ea_t, size_t>::iterator, bool> InsertResult = this->SwitchJumpMap.insert(JumpMapItem);
	assert(InsertResult.second); // should never insert twice for the same IndirJumpAddr
	// Now we assert that each field of TableInfo is sane.
	assert(TableInfo.FollowNodeNum != SMP_BLOCKNUM_UNINIT);
	assert(!TableInfo.CaseBlockNums.empty());
	assert(TableInfo.IndexValue.size() == TableInfo.CaseBlockNums.size());
	this->SwitchInfoArray.push_back(TableInfo);
	return success;
} // end of SMPFunction::AddSwitchTableInfo()

// Is DestOp within the outgoing args area? Assume it must be an ESP-relative
//  DEF operand in order to be a write to the outgoing args area.
// NOTE: DestOp should be already normalized to the entry stack delta.
bool SMPFunction::IsInOutgoingArgsRegion(const STARSOpndTypePtr &DestOp) {
	bool OutArgWrite = false;
	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;

	if (this->IsLeaf())
		return false;

	MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
	if ((BaseReg != STARS_x86_R_sp) && (IndexReg != STARS_x86_R_sp))
		return false;
	if (((BaseReg == STARS_x86_R_sp) && (IndexReg != STARS_x86_R_none))
		|| ((IndexReg == STARS_x86_R_sp) && (BaseReg != STARS_x86_R_none))
		|| (0 < ScaleFactor)) {

#if 0
		SMP_msg("WARNING: WritesToOutgoingArgs called with indexed write.");
		PrintOperand(DestOp);
#endif
		return false;
	}

	if (!this->OutgoingArgsComputed) {
		OutArgWrite = true; // be conservative
	}
	else {
		int SignedOffset = (int) offset;
		SignedOffset -= this->MinStackDelta; // convert to zero-based from bottom of stack frame
		int BelowFrameBytes = this->AllocPointDelta - this->MinStackDelta;
		assert(BelowFrameBytes >= 0);
		int OutArgsLimit = (BelowFrameBytes + this->OutgoingArgsSize);
#if 0
		OutArgWrite = (SignedOffset >= BelowFrameBytes) && (SignedOffset < OutArgsLimit);
#else
		OutArgWrite = (SignedOffset >= 0) && (SignedOffset < OutArgsLimit);
#endif
	}
	return OutArgWrite;
} // end of SMPFunction::IsInOutgoingArgsRegion()

// Does SourceOp from SourceInst fall within incoming args area?
bool SMPFunction::IsInIncomingArgsRegion(SMPInstr *SourceInst, const STARSOpndTypePtr &SourceOp) const {
	bool InArg = MDIsStackAccessOpnd(SourceOp, false) && SourceInst->AreDefsNormalized();
	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;

	if (InArg) {
		MDExtractAddressFields(SourceOp, BaseReg, IndexReg, ScaleFactor, offset);
		int SignedOffset = (int) offset; // will be greater than ISA_ByteWidth if SourceOp is above the return address
		if (((BaseReg != STARS_x86_R_sp) && (IndexReg != STARS_x86_R_sp)) 
			|| (SignedOffset < ((int) global_STARS_program->GetSTARS_ISA_Bytewidth()))) {
			InArg = false;
		}
	}

	return InArg;
} // end of SMPFunction::IsInIncomingArgsRegion()

bool SMPFunction::IsLiveIn(const STARSOpndTypePtr &CurrOp) const {
	return (this->Blocks.front()->IsLiveIn(CurrOp));
}

// Is DestOp a direct memory access above the local vars frame?
bool SMPFunction::WritesAboveLocalFrame(const STARSOpndTypePtr &DestOp, bool OpNormalized, STARS_ea_t InstAddr) {
	bool InArgWrite = false;
	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	long SignedOffset;

	MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
	SignedOffset = (long) offset;
	bool ESPrelative = (BaseReg == MD_STACK_POINTER_REG) || (IndexReg == MD_STACK_POINTER_REG);
	bool EBPrelative = this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG));
	assert(!EBPrelative || !OpNormalized); // stack operands should be normalized by now
	if (!(ESPrelative || EBPrelative))
		return false;
	if (((IndexReg != STARS_x86_R_none) && (BaseReg != STARS_x86_R_none))
		|| (0 < ScaleFactor)) {

		SMP_msg("WARNING: WritesAboveLocalFrame called with indexed write.");
		PrintOperand(DestOp);
		return false;
	}

	// The next statement omits a complication: The possibility that OpNormalized is false,
	//  and an ESPRelative access is above the stack frame. For the purposes of determining
	//  whether a function is safe, this is irrelevant, because !OpNormalized would indicate
	//  that AnalyzedSP is false, which will make the function unsafe anyway. Future uses for
	//  other purposes need to fix this.
	InArgWrite = (ESPrelative && OpNormalized && (SignedOffset >= 0))
		|| (EBPrelative && (SignedOffset > 0));

	if (InArgWrite && OpNormalized && (0 == SignedOffset)) {
		SMP_msg("DANGER: Write to saved return address detected at %llx in function that begins at %llx\n",
			(unsigned long long) InstAddr, (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, RETURN_ADDRESS_WRITE);
	}

	return InArgWrite;
}// end of SMPFunction::WritesAboveLocalFrame()

// Is StackOp direct stack access to caller's frame, incoming args on stack, or return address?
bool SMPFunction::AccessAboveLocalFrame(const STARSOpndTypePtr &StackOp, bool OpNormalized, STARS_ea_t InstAddr, bool WriteAccess){
	bool InArgAccess = false;
	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	long SignedOffset;

	MDExtractAddressFields(StackOp, BaseReg, IndexReg, ScaleFactor, offset);
	SignedOffset = (long) offset;
	bool ESPrelative = (BaseReg == MD_STACK_POINTER_REG) || (IndexReg == MD_STACK_POINTER_REG);
	bool EBPrelative = this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG));
	assert(!EBPrelative || !OpNormalized); // stack operands should be normalized by now
	if (!(ESPrelative || EBPrelative))
		return false;
	if (((IndexReg != STARS_x86_R_none) && (BaseReg != STARS_x86_R_none))
		|| (0 < ScaleFactor)) {

		SMP_msg("WARNING: AccessAboveLocalFrame called with indexed operand.");
		PrintOperand(StackOp);
		return false;
	}

	// The next statement omits a complication: The possibility that OpNormalized is false,
	//  and an ESPRelative access is above the stack frame. For the purposes of determining
	//  whether a function is safe, this is irrelevant, because !OpNormalized would indicate
	//  that AnalyzedSP is false, which will make the function unsafe anyway. Future uses for
	//  other purposes need to fix this.
	InArgAccess = (ESPrelative && OpNormalized && (SignedOffset >= 0))
		|| (EBPrelative && (SignedOffset > 0));

	if (InArgAccess && OpNormalized && (0 == SignedOffset)) {
		if (WriteAccess) {
			SMP_msg("DANGER: Write to saved return address detected at %llx in function that begins at %llx\n",
				(unsigned long long) InstAddr, (unsigned long long) this->GetFirstFuncAddr());
			this->SetUnsafeForFastReturns(true, RETURN_ADDRESS_WRITE);
		}
		else {
			SMP_msg("INFO: Read of saved return address detected at %llx in function that begins at %llx\n",
				(unsigned long long) InstAddr, (unsigned long long) this->GetFirstFuncAddr());
			this->SetUnsafeForFastReturns(true, RETURN_ADDRESS_READ);
		}
	}

	return InArgAccess;
} // end of SMPFunction::AccessAboveLocalFrame()

// Get stack access type from stack frame map
StackAccessType SMPFunction::GetStackAccessType(const STARSOpndTypePtr &StackOp, bool OpNormalized, STARS_ea_t InstAddr, bool WriteAccess) {
	StackAccessType AccessVal = STARS_STACK_UNKNOWN;
	if (this->NegativeOffsetStackFrameMap.empty()) {
		SMP_msg("WARNING: IsAccessToCalleeSavedReg() called with empty stack frame map at %llx\n",
			(uint64_t) InstAddr);
		return AccessVal;
	}

	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	long SignedOffset;

	MDExtractAddressFields(StackOp, BaseReg, IndexReg, ScaleFactor, offset);
	SignedOffset = (long) offset;
	bool ESPrelative = (BaseReg == MD_STACK_POINTER_REG) || (IndexReg == MD_STACK_POINTER_REG);
	bool EBPrelative = this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG));
	assert(!EBPrelative || !OpNormalized); // stack operands should be normalized by now
	if (!(ESPrelative || EBPrelative))
		return AccessVal;

	if (((IndexReg != STARS_x86_R_none) && (BaseReg != STARS_x86_R_none)) || (0 < ScaleFactor)) {
		SMP_msg("WARNING: GetStackAccessType() called at %llx with indexed operand.", (uint64_t) InstAddr);
		PrintOperand(StackOp);
		return AccessVal;
	}

	// TODO: Handle unnormalized operands. Not sure why they should exist at this point.
	if (!OpNormalized)
		return AccessVal;

	if (SignedOffset >= 0) {
		size_t TableIndex = (size_t) SignedOffset;
		if (TableIndex < this->PositiveOffsetStackFrameMap.size())
			AccessVal = this->PositiveOffsetStackFrameMap[TableIndex].EntryType;
		if (WriteAccess) {
			if (STARS_STACK_RETURN_ADDRESS == AccessVal)
				SMP_msg("DANGER: WriteAccess to return address at %llx\n", (uint64_t) InstAddr);
			else if (STARS_STACK_INARG == AccessVal)
				SMP_msg("DANGER: WriteAccess to incoming arg at %llx\n", (uint64_t) InstAddr);
		}
	}
	else {
		int TableIndex = ((int) this->NegativeOffsetStackFrameMap.size()) + SignedOffset;
		assert(0 <= TableIndex);
		if (((size_t) TableIndex) < this->NegativeOffsetStackFrameMap.size()) {
			AccessVal = this->NegativeOffsetStackFrameMap[TableIndex].EntryType;
			if (WriteAccess) {
				if (STARS_STACK_CALLEE_SAVED_REG == AccessVal)
					SMP_msg("DANGER: WriteAccess to callee-saved reg at %llx\n", (uint64_t) InstAddr);
			}
		}
	}
	return AccessVal;
} // end of SMPFunction::GetStackAccessType()

// Is StackOp direct stack access to a callee-saved reg?
bool SMPFunction::IsAccessToCalleeSavedReg(const STARSOpndTypePtr &StackOp, bool OpNormalized, STARS_ea_t InstAddr, bool WriteAccess) {
	StackAccessType AccessVal = this->GetStackAccessType(StackOp, OpNormalized, InstAddr, WriteAccess);
	bool SavedRegAccess = (STARS_STACK_CALLEE_SAVED_REG == AccessVal);

	return SavedRegAccess;
} // end of SMPFunction::IsAccessToCalleeSavedReg()


// Is DestOp an indexed write above the local vars frame?
bool SMPFunction::IndexedWritesAboveLocalFrame(const STARSOpndTypePtr &DestOp) {
	bool InArgWrite = false;
	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	int SignedOffset;

	MDExtractAddressFields(DestOp, BaseReg, IndexReg, ScaleFactor, offset);
	bool ESPrelative = (BaseReg == MD_STACK_POINTER_REG) || (IndexReg == MD_STACK_POINTER_REG);
	bool EBPrelative = this->UseFP && ((BaseReg == MD_FRAME_POINTER_REG) || (IndexReg == MD_FRAME_POINTER_REG));
	assert(!EBPrelative || !this->StackPtrAnalysisSucceeded() || !this->HasSTARSStackPtrAnalysisCompleted()); // stack operands should be normalized by now
	if (!(ESPrelative || EBPrelative))
		return false;

	SignedOffset = (int) offset;
	InArgWrite = (ESPrelative && (SignedOffset > 0))
		|| (EBPrelative && (SignedOffset > 0));

	return InArgWrite;
} // end of SMPFunction::IndexedWritesAboveLocalFrame()

// Is CurrOp found anywhere in the StackPtrCopySet, regardless of which address and stack delta
//  values are associated with it?
bool SMPFunction::IsInStackPtrCopySet(const STARSOpndTypePtr &CurrOp) {
	if (nullptr == CurrOp)
		return false;

	bool found = false;
	// Set is composed of triples, so we have to iterate through it and compare operands.
	set<pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> >, LessStackDeltaCopy>::iterator CopyIter;
	for (CopyIter = this->StackPtrCopySet.begin(); CopyIter != this->StackPtrCopySet.end(); ++CopyIter) {
		pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> > CurrCopy = *CopyIter;
		STARSOpndTypePtr CopyOp = CurrCopy.first;
		if (IsEqOp(CopyOp, CurrOp)) {
			// Found it.
			found = true;
			break;
		}
		else if (CopyOp->GetOpType() > CurrOp->GetOpType()) {
			// already moved past its spot; not found
			break;
		}
	}

	return found;
} // end of SMPFunction::IsInStackPtrCopySet()

// Is definition CurrOp at DefAddr in the StackPtrCopySet, with any stack offset associated with it?
bool SMPFunction::IsDefnInStackPtrCopySet(const STARSOpndTypePtr &CurrOp, const STARS_ea_t &DefAddr) const {
	if (nullptr == CurrOp)
		return false;

	bool found = false;
	// Set is composed of triples, so we have to iterate through it and compare operands.
	set<pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> >, LessStackDeltaCopy>::const_iterator CopyIter;
	for (CopyIter = this->StackPtrCopySet.begin(); CopyIter != this->StackPtrCopySet.end(); ++CopyIter) {
		pair<STARSOpndTypePtr, pair<STARS_ea_t, STARS_sval_t> > CurrCopy = *CopyIter;
		STARSOpndTypePtr CopyOp = CurrCopy.first;
		if (IsEqOp(CopyOp, CurrOp)) {
			STARS_ea_t CopyAddr = CurrCopy.second.first;
			if (CopyAddr == DefAddr) {
				// Found it.
				found = true;
				break;
			}
		}
		else if (CopyOp->GetOpType() > CurrOp->GetOpType()) {
			// already moved past its spot; not found
			break;
		}
	}

	return found;
} // end of SMPFunction::IsDefnInStackPtrCopySet()


// Find evidence of calls to alloca(), which appear as stack space allocations (i.e.
//  subtractions [of unknown values(?)] from the stack pointer) AFTER the local frame allocation instruction
//  for this function.
// Return true if such an allocation is found and false otherwise.
bool SMPFunction::FindAlloca(void) {
	bool FoundAlloca = false;
	list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
	SMPInstr *CurrInst;
	STARS_ea_t InstAddr;
#if SMP_USE_SSA_FNOP_MARKER
	++InstIter;  // skip marker instruction
#endif
	for ( ; InstIter != this->Instrs.end(); ++InstIter) {
		CurrInst = (*InstIter);
		InstAddr = CurrInst->GetAddr();
		if (InstAddr > this->LocalVarsAllocInstr) {
			if (CurrInst->MDIsFrameAllocInstr()) {
				FoundAlloca = true;
				if (CurrInst->HasAllocaRTL()) {
					CurrInst->SetAllocaCall();
				}
			}
			else if (CurrInst->MDIsPushInstr()) {
				this->PushAfterLocalVarAlloc = true;
			}
		}
	}
	return FoundAlloca;
} // end of SMPFunction::FindAlloca()

#if 1
// Emit the annotations describing the regions of the stack frame.
void SMPFunction::EmitStackFrameAnnotations(FILE *AnnotFile, SMPInstr *Instr) {
	STARS_ea_t addr = Instr->GetAddr();

#if 0
	if (0 < IncomingArgsSize) {
		SMP_fprintf(AnnotFile, "%18llx %6llu INARGS STACK esp + %ld %s \n",
			(unsigned long long) addr, (unsigned long long) IncomingArgsSize,
			(long) (this->GetLocalVarsSize() + CalleeSavedRegsSize + RetAddrSize),
			Instr->GetDisasm());
	}
#endif
	if (0 < this->RetAddrSize) {
		SMP_fprintf(AnnotFile, "%18llx %6d MEMORYHOLE STACK esp + %lu ReturnAddress \n",
			(unsigned long long) addr, RetAddrSize, (unsigned long)(this->GetLocalVarsSize() + this->CalleeSavedRegsSize));
	}
	if (this->GoodLocalVarTable && (!this->NegativeOffsetStackFrameMap.empty())) {
		// Walk through the NegativeOffsetStackFrameMap and emit annotations for the regions.
		bool LocalFrameEmitted = false; // have we emitted the LOCALFRAME annotation?
		unsigned long ParentReferentID = 0;
		size_t Limit = this->NegativeOffsetStackFrameMap.size();
		uint16_t CurrentRegionBytes = 0;
		StackAccessType CurrentRegionType = STARS_STACK_RETURN_ADDRESS; // start with previous type
		// Could have stack accesses below stack frame allocation. Don't look below the frame allocation, because
		//  we are emitting these annotations at the frame allocation instruction. !!!!****!!!! Might need to change
		//  for Windows and other stack frame conventions.
		int AllocIndex = ((int) this->NegativeOffsetStackFrameMap.size()) + (int) this->AllocPointDelta;
		// e.g. if 20 bytes in map but -16 is AllocPointDelta, then bottom four bytes are below the frame.
		assert(0 <= AllocIndex);
		for (int index = Limit - 1; index >= AllocIndex; --index) {
			StackAccessType NewType = this->NegativeOffsetStackFrameMap[index].EntryType;
			if (NewType == CurrentRegionType) {
				++CurrentRegionBytes;
			}
			else { // changing regions; time for annotation for previous region
				if (CurrentRegionType == STARS_STACK_CALLEE_SAVED_REG) {
					SMP_fprintf(AnnotFile, "%18llx %6u MEMORYHOLE STACK esp + %lu CalleeSavedRegs \n",
						(unsigned long long) addr, CurrentRegionBytes, (unsigned long) (index + 1 - AllocIndex));
				}
				else if (CurrentRegionType == STARS_STACK_LOCAL_FRAME) {
					ParentReferentID = global_STARS_program->GetDataReferentID();
					global_STARS_program->IncrementDataReferentID();
					STARS_asize_t TempRegionBytes = (STARS_asize_t) CurrentRegionBytes;
					STARS_asize_t TempOutArgsSize = (STARS_asize_t) this->OutgoingArgsSize;
					if (this->GetLocalVarsSize() != (TempRegionBytes + TempOutArgsSize)) {
						SMP_msg("ERROR: LocalVarsSize: %lu not sum of CurrentRegionBytes: %lu  and OutArgsSize: %lu at %llx\n",
							(unsigned long) this->GetLocalVarsSize(), (unsigned long)TempRegionBytes, (unsigned long)TempOutArgsSize, (uint64_t)addr);
					}
					SMP_fprintf(AnnotFile, "%18llx %6u DATAREF STACK %lu esp + 0 PARENT LocalFrame LOCALFRAME\n",
						(unsigned long long) addr, CurrentRegionBytes + this->OutgoingArgsSize, (unsigned long) ParentReferentID);
					LocalFrameEmitted = true;
				}
				else if (CurrentRegionType == STARS_STACK_OUTARG) {
					if (!LocalFrameEmitted) { // entire local stack frame is outgoing args region; unusual case
						ParentReferentID = global_STARS_program->GetDataReferentID();
						global_STARS_program->IncrementDataReferentID();
						SMP_fprintf(AnnotFile, "%18llx %6u DATAREF STACK %lu esp + 0 PARENT LocalFrame LOCALFRAME\n",
							(unsigned long long) addr, CurrentRegionBytes, (unsigned long)ParentReferentID);
						LocalFrameEmitted = true;
					}
					SMP_fprintf(AnnotFile, "%18llx %6zu DATAREF STACK %lu esp + %d CHILDOF %lu OFFSET %d OutArgsRegion OUTARGS\n",
						(unsigned long long) addr, CurrentRegionBytes, global_STARS_program->GetDataReferentID(),
						(unsigned long) (index + 1 - AllocIndex), ParentReferentID, 0);
					global_STARS_program->IncrementDataReferentID();
				}
				else if (CurrentRegionType == STARS_STACK_RETURN_ADDRESS) {
					; // just getting started
				}
				else { // something odd is happening
					;
				}
				CurrentRegionType = NewType;
				CurrentRegionBytes = 1;
			}
		} // end for index in range of NegativeOffsetStackAccessTable
		if (CurrentRegionBytes > 0) { // Get last region as we exited the loop.
			if (CurrentRegionType == STARS_STACK_CALLEE_SAVED_REG) {
				SMP_fprintf(AnnotFile, "%18llx %6u MEMORYHOLE STACK esp + 0 CalleeSavedRegs \n",
					(unsigned long long) addr, CurrentRegionBytes);
			}
			else if (CurrentRegionType == STARS_STACK_LOCAL_FRAME) {
				ParentReferentID = global_STARS_program->GetDataReferentID();
				global_STARS_program->IncrementDataReferentID();
				SMP_fprintf(AnnotFile, "%18llx %6u DATAREF STACK %lu esp + 0 PARENT LocalFrame LOCALFRAME\n",
					(unsigned long long) addr, CurrentRegionBytes, (unsigned long) ParentReferentID);
				LocalFrameEmitted = true;
			}
			else if (CurrentRegionType == STARS_STACK_OUTARG) {
				if (!LocalFrameEmitted) { // entire local stack frame is outgoing args region; unusual case
					ParentReferentID = global_STARS_program->GetDataReferentID();
					global_STARS_program->IncrementDataReferentID();
					SMP_fprintf(AnnotFile, "%18llx %6u DATAREF STACK %lu esp + 0 PARENT LocalFrame LOCALFRAME\n",
						(unsigned long long) addr, CurrentRegionBytes, (unsigned long)ParentReferentID);
					LocalFrameEmitted = true;
				}
				SMP_fprintf(AnnotFile, "%18llx %6zu DATAREF STACK %lu esp + 0 CHILDOF %lu OFFSET %d OutArgsRegion OUTARGS\n",
					(unsigned long long) addr, CurrentRegionBytes, global_STARS_program->GetDataReferentID(), ParentReferentID, 0);
				global_STARS_program->IncrementDataReferentID();
			}
		}

	} // end if (0 < LocalVarsSize)
	return;
} // end of SMPFunction::EmitStackFrameAnnotations() 

#else
// Emit the annotations describing the regions of the stack frame.
void SMPFunction::EmitStackFrameAnnotations(FILE *AnnotFile, SMPInstr *Instr) {
	STARS_ea_t addr = Instr->GetAddr();

#if 0
	if (0 < IncomingArgsSize) {
		SMP_fprintf(AnnotFile, "%18llx %6llu INARGS STACK esp + %ld %s \n",
				(unsigned long long) addr, (unsigned long long) IncomingArgsSize,
				(long) (this->GetLocalVarsSize() + CalleeSavedRegsSize + RetAddrSize),
				Instr->GetDisasm());
	}
#endif
	if (0 < this->RetAddrSize) {
		SMP_fprintf(AnnotFile, "%18llx %6d MEMORYHOLE STACK esp + %lu ReturnAddress \n",
				(unsigned long long) addr, RetAddrSize, (unsigned long) (this->GetLocalVarsSize() + this->CalleeSavedRegsSize));
	}
	if (0 < this->CalleeSavedRegsSize) {
		SMP_fprintf(AnnotFile, "%18llx %6u MEMORYHOLE STACK esp + %lu CalleeSavedRegs \n",
				(unsigned long long) addr, this->CalleeSavedRegsSize, (unsigned long) this->GetLocalVarsSize());
	}
	if ((0 < this->GetLocalVarsSize()) && this->GoodLocalVarTable) {
		unsigned long ParentReferentID = global_STARS_program->GetDataReferentID();
		global_STARS_program->IncrementDataReferentID();
		SMP_fprintf(AnnotFile, "%18llx %6lu DATAREF STACK %lu esp + %d PARENT LocalFrame LOCALFRAME\n",
				(unsigned long long) addr, (unsigned long) this->GetLocalVarsSize(), ParentReferentID, 0);
		if (this->AnalyzedSP && !this->CallsAlloca && (STARS_BADADDR != this->LocalVarsAllocInstr)) {
			// We can only fine-grain the stack frame if we were able to analyze the stack
			if (this->OutgoingArgsSize > 0) {
				SMP_fprintf(AnnotFile, "%18llx %6zu DATAREF STACK %lu esp + %d CHILDOF %lu OFFSET %d OutArgsRegion OUTARGS\n",
					(unsigned long long) addr, this->OutgoingArgsSize, global_STARS_program->GetDataReferentID(), 0, ParentReferentID, 0);
				global_STARS_program->IncrementDataReferentID();
			}
#if SMP_DEBUG_STACK_GRANULARITY
			SMP_msg("LocalVarTable of size %d for function %s\n", this->LocalVarTable.size(),
				this->GetFuncName());
#endif
			for (std::size_t i = 0; i < this->LocalVarTable.size(); ++i) {
#if SMP_DEBUG_STACK_GRANULARITY
				SMP_msg("Entry %d offset %ld size %d name %s\n", i, this->LocalVarTable[i].offset,
					this->LocalVarTable[i].size, this->LocalVarTable[i].VarName);
#endif
				// Don't emit annotations for incoming or outgoing args or anything else
				//  above or below the current local frame.
				if ((this->LocalVarTable[i].offset >= (long) this->FuncInfo->GetFrameSize())
					|| (this->LocalVarTable[i].offset < (long) this->OutgoingArgsSize))
					continue;
				SMP_fprintf(AnnotFile, "%18llx %6zu DATAREF STACK %lu esp + %ld CHILDOF %lu OFFSET %ld LOCALVAR %s \n",
					(unsigned long long) addr, this->LocalVarTable[i].size, global_STARS_program->GetDataReferentID(),
					this->LocalVarTable[i].offset, ParentReferentID,
					this->LocalVarTable[i].offset, this->LocalVarTable[i].VarName);
				global_STARS_program->IncrementDataReferentID();
			}
		} // end if (this->AnalyzedSP and not Alloca .... )
	} // end if (0 < LocalVarsSize)
	return;
} // end of SMPFunction::EmitStackFrameAnnotations() 
#endif

// Fill MemDefAddrs with inst addrs that DEF MemUseOp, tracing back from UseAddr.
// Stop if we encounter a DEF of an address reg in MemUseOp.
void SMPFunction::FindMatchingMemDEFAddrs(STARS_ea_t UseAddr, SMPBasicBlock *CurrBlock, STARSOpndTypePtr &MemUseOp, std::list<STARS_ea_t> &MemDefAddrs, set<int> &AddressRegs) {
	set<int> CommonAddressRegs; // result of set intersection
	bool UseAddrSeen = (STARS_BADADDR == UseAddr);
	set<int> DefRegs; // regs in DEF set of CurrInst

	// Trace back from UseAddr to find MemDEFs that match MemUseOp.
	//  If UseAddr == STARS_BADADDR, that means the mthod has been called recursively and it should
	//  trace back from the last instruction in CurrBlock. By ensuring that an inst addr is < UseAddr,
	//  we accomplish both cases without special cases in the code.
	if (!CurrBlock->IsProcessed()) {
		for (vector<SMPInstr *>::const_reverse_iterator InstIter = CurrBlock->GetRevInstCBegin(); InstIter != CurrBlock->GetRevInstCEnd(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			STARS_ea_t InstAddr = CurrInst->GetAddr();
			if (InstAddr == UseAddr) {
				UseAddrSeen = true;
			}
			else if (UseAddrSeen) {
				DefRegs.clear();
				if (!AddressRegs.empty()) {
					CurrInst->GetDEFRegs(DefRegs);
					(void) set_intersection(AddressRegs.begin(), AddressRegs.end(), DefRegs.begin(),
						DefRegs.end(), std::inserter(CommonAddressRegs, CommonAddressRegs.begin()));
					if (!CommonAddressRegs.empty()) {
						// Intersection is not empty. An address reg is being redefined; stop search.
						return;
					}
				}
				if (CurrInst->HasDestMemoryOperand()) {
					STARSOpndTypePtr MemDefOp = CurrInst->GetMemDef();
					assert(nullptr != MemDefOp);
					if (IsEqOpIgnoreBitwidth(MemUseOp, MemDefOp)) {
						MemDefAddrs.push_back(InstAddr);
					}
				}
			}
		}
		// If we are beginning our first recursion, mark descendants in the CFG (not 
		//  including back edges) as processed to avoid cycling.
		if (STARS_BADADDR != UseAddr) {
			CurrBlock->DepthFirstMarkNonBackEdgeSuccessors();
		}
		else {
			CurrBlock->SetProcessed(true);
		}
		// Recurse into predecessors, passing in STARS_BADADDR as the UseAddr.
		for (list<SMPBasicBlock *>::iterator PredIter = CurrBlock->GetFirstPred(); PredIter != CurrBlock->GetLastPred(); ++PredIter) {
			SMPBasicBlock *PredBlock = (*PredIter);
			this->FindMatchingMemDEFAddrs(STARS_BADADDR, PredBlock, MemUseOp, MemDefAddrs, AddressRegs);
		}
	}
		
	return;
} // end of SMPFunction::FindMatchingMemDEFAddrs()

// Find first inst in loop, return its stack delta.
STARS_sval_t SMPFunction::GetLoopIncomingStackDelta(std::size_t LoopNum) const {
	assert(LoopNum < this->LoopHeadBlockNumbers.size());
	int HeadBlockNum = this->LoopHeadBlockNumbers[LoopNum];
	assert(SMP_BLOCKNUM_UNINIT != HeadBlockNum);
	SMPBasicBlock *HeadBlock = this->RPOBlocks[(size_t) HeadBlockNum];
	assert(nullptr != HeadBlock);
	return HeadBlock->GetIncomingStackDelta();
} // SMPFunction::GetLoopIncomingStackDelta()

// Mark functions with multiple entry points. These will be unsafe for fast returns and
//  are probably IDA Pro disassembly problems rather than true multi-entry functions.
void SMPFunction::DetectMultiEntryFunction(void) {
	this->MultipleEntryPoints = GetFuncInfo()->IsMultiEntry(this->HasIndirectJumps());
} // end of SMPFunction::DetectMultiEntryFunction()

// Audit and fix the IDA Pro code cross references for jumps and jump targets.
void SMPFunction::MDAuditJumpXrefs(void) {

	// ask interface to do this, probably only done for idapro
	GetFuncInfo()->UpdateXrefs();
} // end of SMPFunction::MDAuditJumpXrefs()

// Rebuild AllCallTargets as the union of the direct and indirect call targets.
void SMPFunction::RebuildCallTargets(void) {
	set<STARS_ea_t>::iterator TargetIter;
	this->AllCallTargets.clear();
	for (TargetIter = this->DirectCallTargets.begin(); TargetIter != this->DirectCallTargets.end(); ++TargetIter) {
		STARS_ea_t TargetAddr = (*TargetIter);
		this->AllCallTargets.push_back(TargetAddr);
	}
	for (TargetIter = this->IndirectCallTargets.begin(); TargetIter != this->IndirectCallTargets.end(); ++TargetIter) {
		STARS_ea_t TargetAddr = (*TargetIter);
		this->AllCallTargets.push_back(TargetAddr);
	}
	return;
} // end of SMPFunction::RebuildCallTargets()

	// Main data flow analysis driver. Goes through the function and
//  fills all objects for instructions, basic blocks, and the function
//  itself.
void SMPFunction::AnalyzeFunc(void) {
	set<STARS_ea_t> FragmentWorkList;  // Distant code fragments that belong to this function and need processing
	STARS_ea_t InstAddr; // grab address to help in debugging, conditional breakpoints, etc.

#if SMP_DEBUG_CONTROLFLOW
	SMP_msg("Entering SMPFunction::Analyze.\n");
#endif

	// Get some basic info from the FuncInfo structure.
	this->Size = this->FuncInfo->GetFuncSize();
	this->UseFP = this->FuncInfo->FunctionUsesFP(); 
	this->StaticFunc = this->FuncInfo->IsStaticFunction(); 
	this->LibFunc = this->FuncInfo->IsLibraryFunction(); 
	this->AnalyzedSP = this->FuncInfo->IsStackPointerAnalyzed(); 

#if SMP_DEBUG_CONTROLFLOW
	SMP_msg("SMPFunction::AnalyzeFunc: got basic info.\n");
#endif

	// Detect exception throwing code.
	if (0 == strcmp("__cxa_throw", this->GetFuncName())) {
		this->GetProg()->SetProgramThrowsExceptions();
	}

	// Determine if we are dealing with shared chunks.
	GetFuncInfo()->MarkSharedChunks();
#if STARS_AUDIT_JUMP_XREFS
	this->MDAuditJumpXrefs();
#endif

	STARS_Function_t *MySTARSFunc = this->GetFuncInfo();
	MySTARSFunc->BuildFuncIR(this);

#if KLUDGE_VFPRINTF_FAMILY
	if (!this->HasSharedChunks() && (0 != strstr(this->GetFuncName(), "printf"))) {
		GetFuncInfo()->SetSharedChunks(true);
		SMP_msg("INFO: Kludging function %s\n", this->GetFuncName());
	}
#endif

#if SMP_IDAPRO52_WORKAROUND
	if (!this->HasSharedChunks() && (0 == strcmp(this->GetFuncName(), "error_for_asm"))) {
		GetFuncInfo()->SetSharedChunks(true);
		SMP_msg("Kludging function %s\n", this->GetFuncName());
	}
#endif

	// Now that we have all instructions and basic blocks, link each instruction
	//  to its basic block. 
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	vector<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
			CurrInst = (*InstIter);
			InstAddr = CurrInst->GetAddr();
			CurrInst->SetBlock(CurrBlock->GetThisBlock());


			// jdh hack:  redo syscall analysis as block may not be set 
			// when syscall analysis was done first time.
			if (CurrInst->GetDataFlowType() == INDIR_CALL) {
				if (CurrInst->MDIsSystemCall()) {
					this->SetHasSystemCalls();
				}
			}

			SMPitype InstDataFlowType = CurrInst->GetDataFlowType();
			if ((CALL == InstDataFlowType) || (INDIR_CALL == InstDataFlowType)) {
				CurrBlock->SetHasCallInst();
				STARS_ea_t CallTarget = CurrInst->GetCallTarget();
				if (0 == CallTarget) {
					// Some libc functions have a call to a debug function that is set to a "call 0" 
					//  when the library is not compiled with debug options. The basic block is
					//  unreachable and likely will be removed later by SCCP, but it is nice to remove
					//  it before it clutters up SSA, and just in case it has some weird code pattern
					//  before it that obscures the fact that it is unreachable.
					CurrBlock->SetUnreachableBlock(true);
					SMP_msg("INFO: Marking basic block unreachable due to call 0 instruction at %llx\n", (unsigned long long) InstAddr);
				}
				else {
					if (CALL == InstDataFlowType) {
						if (STARS_BADADDR != CallTarget) {
							bool NewDirectTarget = this->AddDirectCallTarget(CallTarget);
						}
						else {
							SMP_msg("ERROR: Direct call with bad target at %llx\n", (unsigned long long) InstAddr);
						}
					}
					else { // INDIR_CALL
						this->SetHasIndirectCalls();
						if (STARS_BADADDR == CallTarget) {
							if (!CurrInst->MDIsSystemCall()) {
								this->SetHasUnresolvedIndirectCalls();
							}
						}
						else {
							bool NewIndirectTarget = this->AddIndirectCallTarget(CallTarget);
						}
					}
				}
			} // end if CALL or INDIR_CALL
		} // end for each inst
		CurrBlock->Analyze();
		// Set up the map of instructions to basic blocks.
		for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			InstAddr = CurrInst->GetAddr();
			pair<STARS_ea_t, SMPBasicBlock *> MapItem(InstAddr, CurrBlock);
			this->InstBlockMap.insert(MapItem);
			if ( CurrInst->MDIsPossibleStringLoopingOpcode() && CurrInst->MDHasAnyRepeatPrefix() ) {
				CurrBlock->SetHasLoopingStringOpcode();
			}
		}

	} // end for each block

	// Set up basic block links and map of instructions to blocks.
	this->SetLinks();
	this->RPONumberBlocks();
	this->DetectLinkerStubFunction();
	if (this->GetProg()->IsFuncAddrCalledFromOrphanedCode(this->GetFirstFuncAddr())) {
		this->SetIsCalledFromOrphanedCode();
	}

	FragmentWorkList.clear();
	return;
} // end of SMPFunction::AnalyzeFunc()

#define STARS_DEBUG_LINKER_STUBS 1
// Determine whether func is a linker stub, e.g. a PLT stub
void SMPFunction::DetectLinkerStubFunction(void) {
#if STARS_DEBUG_LINKER_STUBS
	if (0x405c40 == this->GetFirstFuncAddr()) {
		SMP_msg("DEBUG: Linker stub: NumBlocks: %zd NumInstrs: %zd \n", this->GetNumBlocks(), this->Instrs.size());
		SMP_msg("DEBUG: Block 0 indir jump: %d Last block 1 tail call: %d \n", 
			this->RPOBlocks[0]->HasIndirectJump(),
			this->RPOBlocks[this->GetNumBlocks() - 1]->EndsWithTailCall());
	}
#endif
	// We will address only GCC PLT stub forms at this time.
	if ((2 >= this->GetNumBlocks()) && (6 >= this->Instrs.size())) {
		// Each block of a PLT stub ends with an indirect jump, and there
		//  are not too many other instructions.
		//  In particular, it would be bizarre for any other function besides a linker stub to
		//  have two basic blocks, with the second block being reached only by an indirect jump from
		//  the first block (which happens through the PLT, but we are not checking that here), and then
		//  the second block ending with a tail call jump, with no return instruction in the func.
		bool IDA_PLT = (this->RPOBlocks[0]->HasIndirectJump()
			&& ((1 == this->GetNumBlocks()) || this->RPOBlocks[this->GetNumBlocks() - 1]->EndsWithTailCall()));
		bool IRDB_PLT = ((1 == this->GetNumBlocks()) && this->RPOBlocks[0]->EndsWithTailCall());
		if (IDA_PLT || IRDB_PLT) {
			this->LinkerStub = true;
			SMP_msg("INFO: Linker stub function found at %llx \n", (uint64_t) this->GetFirstFuncAddr());
		}
		else if (this->Instrs.size() < 6) { // Must have few instructions
			// Try the lower-level pattern match:
			//  indirect jump
			//  push value
			//  unconditional jump
			// e.g., from objdump:
			// <__gnat_free_process@plt>:  jmpq   *0x4661ba(%rip)        # 0x575e30
			// <__gnat_free_process@plt + 6>:    pushq  $0x2f2
			// <__gnat_free_process@plt + 11> : jmpq   0x10cd40
			bool IndirJumpFirst = false;
			bool PushSecond = false;
			bool JumpThird = false;
			uint16_t InstCount = 0;
			for (list<SMPInstr *>::iterator InstIter = this->GetFirstInstIter(); InstIter != this->GetLastInstIter(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				if (!CurrInst->IsMarkerInst()) {
					if (!IndirJumpFirst) {
						IndirJumpFirst = (CurrInst->GetDataFlowType() == INDIR_JUMP) || CurrInst->IsTailCall();
					}
					else if (!PushSecond) {
						PushSecond = CurrInst->MDIsPushInstr();
					}
					else if (!JumpThird) {
						JumpThird = (CurrInst->GetDataFlowType() == JUMP) || CurrInst->IsTailCall();
					}
					++InstCount;
				}
			} // end for each inst
			if ((3 == InstCount) && IndirJumpFirst && PushSecond && JumpThird) {
				this->LinkerStub = true;
				SMP_msg("INFO: Linker stub function found at %llx by 3-inst pattern\n", (uint64_t) this->GetFirstFuncAddr());
			}
		}
	}
	return;
} // end of SMPFunction::DetectLinkerStubFunction()

// Detect function that just puts its return address into a reg and returns.
void SMPFunction::DetectThunkFunction(void) {
	if (1 == this->GetNumBlocks() && this->HasSTARSStackPtrAnalysisCompleted() && this->StackPtrAnalysisSucceeded()) {
		bool FoundReturnAddressRead = false;
		SMPBasicBlock *CurrBlock = this->RPOBlocks[0];
		for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			if (CurrInst->IsMarkerInst())
				continue;
			if (!FoundReturnAddressRead && CurrInst->HasSourceMemoryOperand() && CurrInst->MDIsMoveInstr()) {
				STARSOpndTypePtr MemSrcOp = CurrInst->GetMemUse();
				if (MDIsDirectStackAccessOpnd(MemSrcOp, false)) {
					// Must be normalized, because stack ptr analysis succeeded.
					int BaseReg, IndexReg;
					uint16_t Scale;
					STARS_ea_t Offset;
					MemSrcOp->MDExtractAddressFields(BaseReg, IndexReg, Scale, Offset);
					FoundReturnAddressRead = (0 == Offset);
				}
			}
			else if (FoundReturnAddressRead && CurrInst->HasReturnOpcode()) {
				this->ThunkFunc = true;
				break;
			}
		}
	}
	return;
} // end of SMPFunction::DetectThunkFunction()

// Perform analyses that might need some info from other functions in the call graph.
void SMPFunction::AdvancedAnalysis(void) {
	list<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;

	// IDA Pro has trouble with functions that do not have any local
	//  variables. Unfortunately, the C library has plenty of these
	//  functions. IDA usually claims that frregs is zero and frsize
	//  is N, when the values should have been reversed. We can attempt
	//  to detect this and fix it. IDA Pro also sometimes has trouble with
	//  functions that allocate the stack frame, and then push registers
	//  later before making a call, because it wants to include register
	//  pushes below the stack frame as being part of the stack frame,
	//  even when they are temporary saves and restores. __brk in the
	//  Gnu stdclib is an example as of November of 2012.
	// NOTE: We are no longer fixing IDA Pro frame info as of July, 2015.
	//  We just compute our own frame info, which has always proved to be at
	//  least as good as IDA Pro info.
	bool FrameInfoFixed = this->MDFixFrameInfo();
#if SMP_DEBUG_CONTROLFLOW
	SMP_msg("Returned from MDFixFrameInfo()\n");
#endif

	this->FindAllAllocsAndDeallocs();

	InstIter = this->Instrs.begin();
	if ((*InstIter)->IsMarkerInst()) {
		++InstIter; // skip marker inst
	}
	for ( ; InstIter != this->Instrs.end(); ++InstIter) {
		CurrInst = (*InstIter);
		STARS_ea_t InstAddr = CurrInst->GetAddr(); // for debugging breakpoints
		// We can finally search for stack loads now that UseFP has been fixed by
		//  MDFixFrameInfo(). Otherwise, we would do this in SMPInstr::Analyze(),
		//  but the UseFP flag is not ready that early.
		CurrInst->MDFindLoadFromStack(this->UsesFramePointer());

		// Fix up machine dependent quirks in the def and use lists.
		//  This used to be called from within SMPInstr.Analyze(), but info such as UseFP
		//  is not available that early.
		CurrInst->MDFixupDefUseLists();

		if (CurrInst->HasGoodRTL())
			CurrInst->SyncAllRTs(this->UsesFramePointer(), this->GetFramePtrStackDelta());

		// Detect indirect memory references.
		CurrInst->AnalyzeIndirectRefs(this->UsesFramePointer());
	}

	// Audit the call instructions and call targets.
	//  !!!!****!!!! NOTE: Not sure the address range checks in this code are valid
	//   for functions with scattered chunks.
	if ((!this->AllCallTargets.empty()) || this->HasUnresolvedIndirectCalls()) {
		bool FoundInternalCallTarget = false;
		vector<STARS_ea_t>::iterator CurrTarget = this->AllCallTargets.begin();
		set<STARS_ea_t>::iterator CurrDirectTarget, CurrIndirectTarget;
		while (CurrTarget != this->AllCallTargets.end()) {
			if (this->IsInstIDInFunc(*CurrTarget)) {
				// Found a call target that is within the function.
				FoundInternalCallTarget = true;
				if (this->GetFirstFuncAddr() == *CurrTarget) { // Direct recursion, not a pseudo-jump
					this->DirectlyRecursive = true;
				}
				CurrTarget = this->AllCallTargets.erase(CurrTarget);
			}
			else {
				++CurrTarget;
			}
		}
		if (FoundInternalCallTarget) {
			// We have to mark the pseudo-call instructions and audit the direct and
			//  indirect call target vectors.

			// Audit direct call targets.
			CurrDirectTarget = this->DirectCallTargets.begin();
			set<STARS_ea_t>::iterator CopyOfIterator;
			bool CallTargetRemoved = false;
			while (CurrDirectTarget != this->DirectCallTargets.end()) {
				STARS_ea_t TargetAddr = (*CurrDirectTarget);
				if (this->IsInstIDInFunc(TargetAddr) && (this->FuncInfo->get_startEA() != TargetAddr)) {
					// Found a call target that is within the function, but not recursive call to func entry.
					CopyOfIterator = this->RemoveDirectCallTarget(TargetAddr);
					CurrDirectTarget = CopyOfIterator;
					CallTargetRemoved = true;
				}
				else {
					++CurrDirectTarget;
				}
			}
			// Audit indirect call targets.
			CurrIndirectTarget = this->IndirectCallTargets.begin();
			while (CurrIndirectTarget != this->IndirectCallTargets.end()) {
				STARS_ea_t TargetAddr = (*CurrIndirectTarget);
				if (this->IsInstIDInFunc(TargetAddr)) {
					// Found a call target that is within the function.
					CopyOfIterator = CurrIndirectTarget;
					++CopyOfIterator; // point to element after element that will be erased
					this->IndirectCallTargets.erase(CurrIndirectTarget);
					CurrIndirectTarget = CopyOfIterator;
					CallTargetRemoved = true;
				}
				else {
					++CurrIndirectTarget;
				}
			}
#if 1
			// Find calls used as jumps.
			list<SMPInstr *>::iterator InstIter = this->Instrs.begin();
			while (InstIter != this->Instrs.end()) {
				SMPInstr *CurrInst = (*InstIter);
				SMPitype InstFlow = CurrInst->GetDataFlowType();
				if ((CALL == InstFlow) || (INDIR_CALL == InstFlow)) {
					CurrInst->AnalyzeCallInst(this->GetFirstFuncAddr());
				}
				++InstIter;
			}
#endif
		} // end if (FoundInternalCallTarget)
	}

	// Figure out the stack frame and related info.
	(void) this->AnalyzeStackPointerDeltas();
	this->StackPtrCopySet.clear(); // free memory
	this->TempStackDeltaReachesList.clear();
	this->TempReachingDefs.clear();

	if (this->StackPtrAnalysisSucceeded() && this->HasGoodRTLs() && (!this->HasUnresolvedIndirectJumps())) {
		this->AuditCallingConvention();
#define RECOMPUTE_VARKILL_SETS_AFTER_NORMALIZATION 1
#if RECOMPUTE_VARKILL_SETS_AFTER_NORMALIZATION
		if (this->CallsAlloca) {
			list<SMPBasicBlock *>::iterator BlockIter;
			for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
				(*BlockIter)->InitKilledExposed(UseFP, false);
			}
		}
#endif
		this->LiveVariableAnalysis(false);
	}

#define RECOMPUTE_LVA_SETS_AFTER_NORMALIZATION 0
#if RECOMPUTE_LVA_SETS_AFTER_NORMALIZATION
	// Recompute LVA sets if we have stack ops in any block's LVA sets
	//  and we went through alloca() reprocessing in AnalyzeStackPointerDeltas() above.
	if (this->CallsAlloca) {
		this->RecomputeStackLVA();
	}
#endif

	// Calculate min and max stack point deltas, and AllocPointDelta.
	this->MinStackDelta = 20000; // Final value should be negative or zero
	this->MaxStackDelta = -1000; // Final value should be zero.
	InstIter = this->Instrs.begin();
	if ((*InstIter)->IsMarkerInst()) {
		++InstIter; // skip marker inst
	}
	for (; InstIter != this->Instrs.end(); ++InstIter) {
		CurrInst = (*InstIter);
		STARS_ea_t addr = CurrInst->GetAddr();
		STARS_sval_t sp_delta = CurrInst->GetStackPtrOffset();
		if (sp_delta < this->MinStackDelta)
			this->MinStackDelta = sp_delta;
		if (sp_delta > this->MaxStackDelta)
			this->MaxStackDelta = sp_delta;
		if (addr == this->LocalVarsAllocInstr) {
			// Total stack pointer delta is sp_delta for the next instruction,
			//  because IDA updates the sp delta AFTER each instruction.
			list<SMPInstr *>::iterator NextInstIter = InstIter;
			++NextInstIter;
			if (NextInstIter != this->Instrs.end()) {
				sp_delta = (*NextInstIter)->GetStackPtrOffset();
				this->AllocPointDelta = sp_delta;
			}
		}
	}

	bool WritesMem = false;
	bool CallChainNonReturning = false;
	bool Changed = this->ComputeInOutRegs(false, WritesMem, CallChainNonReturning);
	this->AltersMemory |= WritesMem;
	return;
} // end of SMPFunction::AdvancedAnalysis()

// Fix call inst DEFs and USEs, return true if changed
bool SMPFunction::AdvancedAnalysis2(void) {
	bool FuncChanged = false;
#if 0   // We want to process directly recursive calls to see what is LiveIn and not LiveIn
	if (!this->IsLeaf()) {
#endif
		list<SMPInstr *>::iterator InstIter;
		SMPInstr *CurrInst;

		// Fixup DEF and USE lists for call insts.
		InstIter = this->Instrs.begin();
		if ((*InstIter)->IsMarkerInst()) {
			++InstIter; // skip marker inst
		}
		for (; InstIter != this->Instrs.end(); ++InstIter) {
			CurrInst = (*InstIter);
			STARS_ea_t addr = CurrInst->GetAddr();
			bool InstChanged = CurrInst->MDFixupCallDefUseLists();
			FuncChanged = FuncChanged || InstChanged;
		}
#if 0 // debug crashes first
		if (FuncChanged) {
			// LVA sets are stale because call instructions had DEFs and USEs updated.
			this->LiveVariableAnalysis(true);
		}
#endif
#if 0
	}
#endif

	return FuncChanged;
} // end of SMPFunction::AdvancedAnalysis2()

// Perform analyses that might need some info from other functions in the call graph.
void SMPFunction::AdvancedAnalysis3(void) {
	list<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;

#if SMP_DEBUG_CONTROLFLOW
	SMP_msg("SMPFunction::AdvancedAnalysis3: set stack frame info.\n");
#endif
	if (!(this->HasSharedChunks())) {
		this->SetStackFrameInfo();
	} // end if not shared chunks
	else { // has shared chunks; still want to compute stack frame info
#ifdef SMP_DEBUG_FUNC
		SMP_msg("INFO: %s has shared chunks \n", this->GetFuncName());
#endif
		// Figure out the stack frame and related info.
		this->SetStackFrameInfo();
	}

#if SMP_COUNT_MEMORY_ALLOCATIONS
	SMPInstCount += ((unsigned long) this->Instrs.size());
	SMPBlockCount += ((unsigned long) this->Blocks.size());
	SMPLocalVarCount += ((unsigned long) this->LocalVarTable.size());
#endif
	return;
} // end of SMPFunction::AdvancedAnalysis3()

// Count call targets that have not been processed.
std::size_t SMPFunction::UnprocessedCalleesCount(void) {
	std::size_t UnprocessedTargetsCount = 0;

	std::size_t TargetIndex;
	for (TargetIndex = 0; TargetIndex < this->AllCallTargets.size(); ++TargetIndex) {
		SMPFunction *CurrTarget = this->GetProg()->FindFunction(this->AllCallTargets.at(TargetIndex));
		if (nullptr == CurrTarget) {
#if 0
			// Bad call targets are removed in AdvancedAnalysis(), which comes later.
			SMP_msg("ERROR: NULL CallTarget in UnprocessedCalleesCount() at TargetIndex %zu \n", TargetIndex);
#endif
		}
		else if (!(CurrTarget->IsFuncProcessed())) {
			++UnprocessedTargetsCount;
		}
	}
	return UnprocessedTargetsCount;
} // end of SMPFunction::UnprocessedCalleesCount()

STARS_ea_t SMPFunction::GetFirstUnprocessedCallee(void) {
	STARS_ea_t CalleeAddr = STARS_BADADDR;
	std::size_t TargetIndex;
	for (TargetIndex = 0; TargetIndex < this->AllCallTargets.size(); ++TargetIndex) {
		STARS_ea_t TargetAddr = this->AllCallTargets.at(TargetIndex);
		SMPFunction *CurrTarget = this->GetProg()->FindFunction(TargetAddr);
		if ((nullptr != CurrTarget) && (!(CurrTarget->IsFuncProcessed()))) {
			CalleeAddr = TargetAddr;
			break;
		}
	}
	return CalleeAddr;
} // end of SMPFunction::GetFirstUnprocessedCallee()

// Is the code starting at TargetAddr a non-shared chunk that jumps back into our function?
//  If so, it can be incorporated into our function rather than treated as a separate function.
//  This method is called only when we see a jump outside our function, and it is looking for
//  code fragments that are not really functions (i.e. don't have a stack frame, jump straight back
//  into our function after executing a few instructions, not a chunk shared among other functions).
//  These code fragments are found in the locking and unlocking code of the gcc stdlib, for example.
bool SMPFunction::FindDistantCodeFragment(STARS_ea_t TargetAddr) {
	return GetFuncInfo()->FindDistantCodeFragment(this,TargetAddr);
} // end of SMPFunction::FindDistantCodeFragment()

// Free memory that is no longer needed after loop 2 of SMPProgram::Analyze().
void SMPFunction::FreeUnusedMemory2(void) {
	std::size_t UnusedElements;
	std::size_t CurrSize;

#if 0
	// Go through vector containers and resize to current capacity, if the vector
	//  has been fully computed by the time SMPProgram:Analyze() loop 2 completes.
	CurrSize = this->DirectCallTargets.size();
	UnusedElements = this->DirectCallTargets.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<STARS_ea_t>(this->DirectCallTargets).swap(this->DirectCallTargets);
#else
		this->DirectCallTargets.resize(CurrSize);
#endif
	}

	CurrSize = this->IndirectCallTargets.size();
	UnusedElements = this->IndirectCallTargets.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<STARS_ea_t>(this->IndirectCallTargets).swap(this->IndirectCallTargets);
#else
		this->IndirectCallTargets.resize(CurrSize);
#endif
	}
#endif

	CurrSize = this->AllCallTargets.size();
	UnusedElements = this->AllCallTargets.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<STARS_ea_t>(this->AllCallTargets).swap(this->AllCallTargets);
#else
		this->AllCallTargets.resize(CurrSize);
#endif
	}

	CurrSize = this->SavedRegLoc.size();
	UnusedElements = this->SavedRegLoc.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<int>(this->SavedRegLoc).swap(this->SavedRegLoc);
#else
		this->SavedRegLoc.resize(CurrSize);
#endif
	}

	CurrSize = this->RPOBlocks.size();
	UnusedElements = this->RPOBlocks.capacity() - CurrSize;
	if (0 < UnusedElements) {
		list<SMPBasicBlock *>::iterator DummyIter = this->Blocks.end();
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<SMPBasicBlock *>(this->RPOBlocks).swap(this->RPOBlocks);
#else
		this->RPOBlocks.resize(CurrSize, DummyIter);
#endif
	}

	CurrSize = this->LocalVarTable.size();
	UnusedElements = this->LocalVarTable.capacity() - CurrSize;
	if (0 < UnusedElements) {
		struct LocalVar DummyVar;
		DummyVar.offset = 0;
		DummyVar.size = 0;
		UnusedStructCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<struct LocalVar>(this->LocalVarTable).swap(this->LocalVarTable);
#else
		this->LocalVarTable.resize(CurrSize, DummyVar);
#endif
	}

	return;
} // end of SMPFunction::FreeUnusedMemory2()

// Free memory that is no longer needed after loop 3 of SMPProgram::Analyze().
void SMPFunction::FreeUnusedMemory3(void) {
	std::size_t UnusedElements;
	std::size_t CurrSize;

	// Go through vector containers and resize to current capacity, if the vector
	//  has been fully computed by the time SMPProgram:Analyze() loop 2 completes.
	CurrSize = this->ReturnRegTypes.size();
	UnusedElements = this->ReturnRegTypes.capacity() - CurrSize;
	if (0 < UnusedElements) {
		UnusedIntCount += (unsigned long) UnusedElements;
#if SMP_SHRINK_TO_FIT
		std::vector<SMPOperandType>(this->ReturnRegTypes).swap(this->ReturnRegTypes);
#else		
		this->ReturnRegTypes.resize(CurrSize);
#endif
	}

	return;
} // end of SMPFunction::FreeUnusedMemory3()

// Free memory that is no longer needed after type inference (loop 4 of SMPProgram::Analyze()).
void SMPFunction::FreeUnusedMemory4(void) {
	// this->KillSet.clear();  // needed for EmitFuncSPARKAda()
	// this->LiveOutSet.clear();
	this->BlocksDefinedIn.clear();

#if SMP_SHRINK_TO_FIT
#if 0
	STARSOpndSet(this->KillSet).swap(this->KillSet);
	STARSOpndSet(this->LiveOutSet).swap(this->LiveOutSet);
#endif
#endif

	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		(*BlockIter)->FreeUnusedMemory4();
	}
	return;
} // end of SMPFunction::FreeUnusedMemory4()

// Free SSA data structures that are no longer needed when all SSA numbers have
//  been recorded in DEFs and USEs.
void SMPFunction::FreeSSAMemory(void) {
#if 0 // dominator info is useful for lots of CFG analyses
	this->IDom.clear();
	this->DomTree.clear();
#endif
	// this->BlocksDefinedIn.clear();
	this->SSACounter.clear();
	this->SSAStack.clear();

#if SMP_SHRINK_TO_FIT
#if 0
	vector<int>(this->IDom).swap(this->IDom);
	vector<pair<int, list<int> > >(this->DomTree).swap(this->DomTree);
#endif
	vector<list<int> >(this->BlocksDefinedIn).swap(this->BlocksDefinedIn);
	vector<int>(this->SSACounter).swap(this->SSACounter);
	vector<list<int> >(this->SSAStack).swap(this->SSAStack);
#endif

	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		(*BlockIter)->FreeSSAMemory();
	}
	return;
} // end of SMPFunction::FreeSSAMemory()

// For each instruction, mark the non-flags-reg DEFs as having live
//  metadata (mmStrata needs to fetch and track this metadata for this
//  instruction) or dead metadata (won't be used as addressing reg, won't
//  be stored to memory, won't be returned to caller, won't be passed to a callee).
void SMPFunction::AnalyzeMetadataLiveness(void) {
	bool changed;
	int BaseReg;
	int IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	list<SMPInstr *>::iterator InstIter;
	list<SMPInstr *>::reverse_iterator RevInstIter;
	set<DefOrUse, LessDefUse>::iterator CurrDef;
	set<DefOrUse, LessDefUse>::iterator CurrUse;
	set<DefOrUse, LessDefUse>::iterator NextUse;
	bool DebugFlag = false;
	bool UseFP = this->UsesFramePointer();
	int IterationCount = 0;

#if SMP_DEBUG_DATAFLOW
	if (0 == strcmp("uw_frame_state_for", this->GetFuncName())) {
		DebugFlag = true;
	}
#endif

	do {
		changed = false;
		++IterationCount;
		bool SafeMemDest;
		if (DebugFlag) {
			SMP_msg("AnalyzeMetadataLiveness iteration count: %d \n", IterationCount);
		}
		for (RevInstIter = this->Instrs.rbegin(); RevInstIter != this->Instrs.rend(); ++RevInstIter) {
			SMPInstr *CurrInst = (*RevInstIter);
			STARS_ea_t InstAddr = CurrInst->GetAddr();
			SafeMemDest = false;  // true for some SafeFunc instructions
#if 0
			// Skip the SSA marker instruction.
			if (CurrInst->IsMarkerInst())
				continue;
#endif

			if (DebugFlag) {
				SMP_msg("Inst addr: %llx \n", (unsigned long long) CurrInst->GetAddr());
			}
			CurrDef = CurrInst->GetFirstDef();
			while (CurrDef != CurrInst->GetLastDef()) {
				if (DEF_METADATA_UNANALYZED == CurrDef->GetMetadataStatus()) {
					STARSOpndTypePtr DefOp = CurrDef->GetOp();
					// Handle special registers never used as address regs.
					if (DefOp->MatchesReg(X86_FLAGS_REG) || DefOp->MDIsSpecialRegOpType()) {
						CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_UNUSED);
						changed = true;
					}
					else if (MDIsStackOrFramePointerReg(DefOp, UseFP)) {
						// Stack pointer register DEFs always have live
						//  metadata, but we don't need to propagate back
						//  through particular DEF-USE chains.
						CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_USED);
						changed = true;
					}
					else if (DefOp->IsMemOp()) {
						// DEF is a memory operand. The addressing registers
						//  therefore have live metadata, and the memory metadata is live.
						// EXCEPTION: If the function is Safe, then direct stack writes
						//  to local variables (above the outgoing args area of the frame)
						//  are not live metadata, and there will be no indirect local frame
						//  writes, by definition of "safe." So, for safe funcs, only
						//  the o_mem (globals) and indirect writes are live metadata.
						if (this->IsSafe() && MDIsStackAccessOpnd(DefOp, this->UseFP)
							&& (!this->WritesAboveLocalFrame(DefOp, CurrInst->AreDefsNormalized(), InstAddr))
							&& (!this->IsInOutgoingArgsRegion(DefOp))) {
							++CurrDef;
							SafeMemDest = true;
							continue;
						}
						CurrDef = CurrInst->SetDefMetadata(DefOp, DEF_METADATA_USED);
						changed = true;
						MDExtractAddressFields(DefOp, BaseReg, IndexReg,
							ScaleFactor, offset);
						if (STARS_x86_R_none != BaseReg) {
							STARSOpndTypePtr BaseOp = CurrInst->MakeRegOpnd(MDCanonicalizeSubReg((STARS_regnum_t) BaseReg));
							if (MDIsStackOrFramePointerReg(BaseOp, UseFP)) {
								; // do nothing; DEF handled by case above
							}
							else {
								CurrUse = CurrInst->FindUse(BaseOp);
								if (CurrUse == CurrInst->GetLastUse()) {
									SMP_msg("FATAL ERROR: BaseReg %d not in USE list at %llx for %s\n",
										BaseOp->GetReg(), (unsigned long long) CurrInst->GetAddr(),
										CurrInst->GetDisasm());
									assert(CurrUse != CurrInst->GetLastUse()); // kablooey!
								}
								if (this->IsGlobalName(BaseOp)) {
									changed |= this->PropagateGlobalMetadata(BaseOp,
										DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
								}
								else {
									changed |= CurrInst->GetBlock()->PropagateLocalMetadata(BaseOp,
										DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
								}
							}
						} // end if STARS_x86_R_none != BaseReg
						if (STARS_x86_R_none != IndexReg) {
							STARSOpndTypePtr IndexOp = CurrInst->MakeRegOpnd(MDCanonicalizeSubReg((STARS_regnum_t) IndexReg));
							if (MDIsStackOrFramePointerReg(IndexOp, UseFP)) {
								; // do nothing; DEF handled by case above
							}
							else {
								CurrUse = CurrInst->FindUse(IndexOp);
								if (CurrUse == CurrInst->GetLastUse()) {
									SMP_msg("FATAL ERROR: IndexReg %d not in USE list at %llx for %s\n",
										IndexOp->GetReg(), (unsigned long long) CurrInst->GetAddr(),
										CurrInst->GetDisasm());
								}
								assert(CurrUse != CurrInst->GetLastUse());
								if (0 != ScaleFactor) {
									; // mmStrata knows scaled reg is NUMERIC
									// ... its metadata is not fetched
								}
								else if (this->IsGlobalName(IndexOp)) {
									changed |= this->PropagateGlobalMetadata(IndexOp,
										DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
								}
								else {
									changed |= CurrInst->GetBlock()->PropagateLocalMetadata(IndexOp,
										DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
								}
							}
						} // end if STARS_x86_R_none != IndexReg
					} // end if X86_FLAGS_REG .. else if stack ptr ... 
				} // end if unanalyzed metadata usage
				++CurrDef;
			} // end while processing DEFs
			if ((RETURN == CurrInst->GetDataFlowType())
				|| (CurrInst->IsTailCall())   // quasi-return
				|| (CurrInst->IsBranchToOtherFunc())
				|| (CALL == CurrInst->GetDataFlowType())
				|| (INDIR_CALL == CurrInst->GetDataFlowType())) {
				// The EAX and EDX registers can be returned to the caller,
				//  which might use their metadata. They show up as USEs
				//  of the return instruction. Some library functions
				//  pass return values in non-standard ways. e.g. through
				//  EBX or EDI, so we treat all return regs the same.
				// For CALL instructions, values can be passed in caller-saved
				//  registers, unfortunately, so the metadata is live-in.
				CurrUse = CurrInst->GetFirstUse();
				while (CurrUse != CurrInst->GetLastUse()) {
					NextUse = CurrUse;
					++NextUse;
					STARSOpndTypePtr ReturnOp = CurrUse->GetOp();
					if (DebugFlag) {
						SMP_msg("ReturnOp: ");
						PrintOperand(ReturnOp);
						SMP_msg("\n");
					}
					if (ReturnOp->IsRegOp() &&
						(!MDIsStackOrFramePointerReg(ReturnOp, UseFP)) &&
						(!ReturnOp->MatchesReg(X86_FLAGS_REG))) {
						if (this->IsGlobalName(ReturnOp)) {
							changed |= this->PropagateGlobalMetadata(ReturnOp,
									DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
						}
						else {
							changed |= CurrInst->GetBlock()->PropagateLocalMetadata(ReturnOp,
									DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
						}
					}
					CurrUse = NextUse;
				} // end while all USEs
			} // end if return or call
			else if (CurrInst->HasDestMemoryOperand() 
				|| CurrInst->MDIsPushInstr()) {
				// Memory writes cause a lot of metadata usage.
				//  Addressing registers in the memory destination
				//  have live metadata used in bounds checking. The
				//  register being stored to memory could end up being
				//  used in some other bounds checking, unless we 
				//  have precise memory tracking and know that it
				//  won't.
				// We handled the addressing registers above, so we
				//  handle the register written to memory here.
				// The same exception applies as above: If the destination
				//  memory operand is not a stack write, then safe functions
				//  do not need to track the metadata.
				// If we push a register and have callees, the metadata could
				//  be live, if the callee gets its incoming args from our push
				//  instructions.
				if (SafeMemDest && !(CurrInst->MDIsPushInstr() && !this->IsLeaf())) {
					continue;  // go to next instruction
				}
				CurrUse = CurrInst->GetFirstUse();
				while (CurrUse != CurrInst->GetLastUse()) {
					NextUse = CurrUse;
					++NextUse;
					STARSOpndTypePtr UseOp = CurrUse->GetOp();
					// NOTE: **!!** To be less conservative, we
					//  should propagate more for exchange category
					//  instructions.
					if ((UseOp->IsRegOp()) && (!MDIsStackOrFramePointerReg(UseOp, UseFP)) && (!UseOp->MatchesReg(X86_FLAGS_REG))) {
						if (this->IsGlobalName(UseOp)) {
							changed |= this->PropagateGlobalMetadata(UseOp,
									DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
						}
						else {
							changed |= CurrInst->GetBlock()->PropagateLocalMetadata(UseOp,
									DEF_METADATA_USED, CurrUse->GetSSANum(), InstAddr);
						}
					} // end if register
					CurrUse = NextUse;
				} // end while all USEs
			} // end if call or return else if memdest ...
		} // end for all instructions
	} while (changed);

	// All DEFs that still have status DEF_METADATA_UNANALYZED can now
	//  be marked as DEF_METADATA_UNUSED.
	for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		STARS_ea_t InstAddr = CurrInst->GetAddr();
#if 0   // important to know if incoming values are dead metadata
		if (CurrInst->IsMarkerInst())
			continue;
#endif
		CurrDef = CurrInst->GetFirstDef();
		while (CurrDef != CurrInst->GetLastDef()) {
			SMPMetadataType CurrStatus = CurrDef->GetMetadataStatus();
			if (DEF_METADATA_UNANALYZED == CurrStatus) {
				CurrDef = CurrInst->SetDefMetadata(CurrDef->GetOp(),
					DEF_METADATA_UNUSED);
				assert(CurrDef != CurrInst->GetLastDef());
				++DeadMetadataCount;
			}
			else if (DEF_METADATA_USED <= CurrStatus) {
				++LiveMetadataCount;
			}
			else {
				;  // must be UNUSED, but only because it is the flags register. Don't count.
			}
			++CurrDef;
		}
	}

	return;
} // end of SMPFunction::AnalyzeMetadataLiveness() 

// Propagate the metadata Status for UseOp/SSANum to its global DEF.
// Return true if successful.
bool SMPFunction::PropagateGlobalMetadata(const STARSOpndTypePtr &UseOp, SMPMetadataType Status, int SSANum, STARS_ea_t UseAddr) {
	bool changed = false;
	bool UseFP = this->UsesFramePointer();

	if ((0 > SSANum) || (!MDIsDataFlowOpnd(UseOp, UseFP)))
		return false;

	// Find the DEF of UseOp with SSANum.
	STARS_ea_t DefAddr;
	SMPBasicBlock *UseBlock;
	SMPBasicBlock *DefBlock;

	if (!STARS_IsBlockNumPseudoID(UseAddr)) { // UseAddr is an inst addr
		UseBlock = this->GetBlockFromInstAddr(UseAddr);
	}
	else { // UseAddr is a block number
		UseBlock = this->GetBlockByNum(STARS_GetBlockNumFromPseudoID(UseAddr));
	}

	STARSOpndTypePtr DefMoveOp = nullptr;
	DefAddr = UseBlock->GetUltimateDefAddr(UseOp, UseAddr, SSANum, false, false, false, DefMoveOp);

	if (STARS_BADADDR == DefAddr) {
		return changed;
	}

	if (STARS_LIVEIN_PSEUDO_ID == DefAddr) {
		// Seems to only happen on STARS/IRDB. Need to debug later.  !!!!****!!!!
		return changed;
	}

	if (!STARS_IsBlockNumPseudoID(DefAddr)) { // found a DEF inst.
		SMPInstr *CurrInst = this->GetInstFromAddr(DefAddr);
		STARS_ea_t InstAddr = DefAddr;
		DefBlock = this->GetBlockFromInstAddr(DefAddr);
		set<DefOrUse, LessDefUse>::iterator CurrDef = CurrInst->FindDef(UseOp);
		set<DefOrUse, LessDefUse>::iterator CurrUse;
		assert(CurrDef != CurrInst->GetLastDef());
		assert(SSANum == CurrDef->GetSSANum());
		if (Status != CurrDef->GetMetadataStatus()) {
			CurrDef = CurrInst->SetDefMetadata(UseOp, Status);
			changed = (CurrDef != CurrInst->GetLastDef());
			bool PropThroughUses = changed;

			// If source operand was memory, we have two cases.
			//  (1) The instruction could be a load, in which
			//  case we should simply terminate the
			//  propagation, because the prior DEF of a memory
			//  location is always considered live metadata
			//  already, and we do not want to propagate liveness
			//  to the address regs in the USE list.
			//  EXCEPTION: For safe funcs, we propagate liveness
			//   for stack locations.
			//  (2) We could have an arithmetic operation such
			//  as reg := reg arithop memsrc. In this case, we
			//  still do not want to propagate through the memsrc,
			//  (with the same safe func EXCEPTION),
			//  but the register is both DEF and USE and we need
			//  to propagate through the register.
			if (CurrInst->HasSourceMemoryOperand()) {
				if (this->IsSafe()) {
					STARSOpndTypePtr MemSrcOp = CurrInst->MDGetMemUseOp();
					assert(! MemSrcOp->IsVoidOp());
					if (MDIsDirectStackAccessOpnd(MemSrcOp, this->UseFP)) {
						// We have a SafeFunc stack access. This is
						//  the EXCEPTION case where we want to
						//  propagate metadata liveness for a memory
						//  location.
						CurrUse = CurrInst->FindUse(MemSrcOp);
						assert(CurrUse != CurrInst->GetLastUse());
						if (this->IsGlobalName(MemSrcOp)) {
							changed |= this->PropagateGlobalMetadata(MemSrcOp,
								Status, CurrUse->GetSSANum(), InstAddr);
						}
						else {
							changed |= CurrInst->GetBlock()->PropagateLocalMetadata(MemSrcOp,
								Status, CurrUse->GetSSANum(), InstAddr);
						}
					} // end if stack access operand
				} // end if SafeFunc
				if (3 == CurrInst->GetOptType()) { // move inst
					PropThroughUses = false; // load address regs are not live metadata
				}
				else if ((5 == CurrInst->GetOptType())
					|| (STARS_NN_and == CurrInst->GetIDAOpcode())
					|| (STARS_NN_or == CurrInst->GetIDAOpcode())
					|| (STARS_NN_xor == CurrInst->GetIDAOpcode())) {
					// add, subtract, and, or with memsrc
					// Find the DEF reg in the USE list.
					CurrUse = CurrInst->FindUse(UseOp);
					assert(CurrUse != CurrInst->GetLastUse());
					changed |= this->PropagateGlobalMetadata(UseOp,
						Status, CurrUse->GetSSANum(), InstAddr);
					PropThroughUses = false;
				}
			} // end if memory source

			// Now, propagate the metadata status to all the
			//  non-memory, non-flags-reg, non-special-reg 
			//  (i.e. regular registers) USEs.
			if (PropThroughUses) {
				CurrUse = CurrInst->GetFirstUse();
				while (CurrUse != CurrInst->GetLastUse()) {
					STARSOpndTypePtr CurrUseOp = CurrUse->GetOp();
					// NOTE: **!!** To be less conservative, we
					//  should propagate less for exchange category
					//  instructions.
					if ((CurrUseOp->IsRegOp()) && (!MDIsStackOrFramePointerReg(CurrUseOp, UseFP))
						&& (!CurrUseOp->MatchesReg(X86_FLAGS_REG))) {

						if (this->IsGlobalName(CurrUseOp)) {
							changed |= this->PropagateGlobalMetadata(CurrUseOp,
								Status, CurrUse->GetSSANum(), InstAddr);
						}
						else {
							changed |= CurrInst->GetBlock()->PropagateLocalMetadata(CurrUseOp,
								Status, CurrUse->GetSSANum(), InstAddr);
						}
					}
					++CurrUse;
				} // end while all USEs
			}
		}
	}

	else { // Found a DEF block number in DefAddr.
		// Check the Phi functions
		DefBlock = this->GetBlockByNum(STARS_GetBlockNumFromPseudoID(DefAddr));
		set<SMPPhiFunction, LessPhi>::iterator DefPhi;
		DefPhi = DefBlock->FindPhi(UseOp);
		assert(DefPhi != DefBlock->GetLastPhi());
		assert(SSANum == DefPhi->GetDefSSANum());
		if (Status != DefPhi->GetDefMetadata()) {
			DefPhi = DefBlock->SetPhiDefMetadata(UseOp, Status);
			changed = true;
			// If the Phi DEF has live metadata, then the Phi
			//  USEs each have live metadata. Propagate.
			int UseSSANum;
			for (std::size_t index = 0; index < DefPhi->GetPhiListSize(); ++index) {
				UseSSANum = DefPhi->GetUseSSANum(index);
				// UseSSANum can be -1 in some cases because
				//  we conservatively make EAX and EDX be USEs
				//  of all return instructions, when the function
				//  might have a void return type, making it
				//  appear as if an uninitialized EAX or EDX
				//  could make it to the return block.
				if (0 <= UseSSANum) {
					changed |= this->PropagateGlobalMetadata(UseOp,	Status, UseSSANum, DefAddr);
				}
			}
		}

	} // end if (DefAddr is inst addr) else ... [DefAddr is block number]

	return changed;
} // end of SMPFunction::PropagateGlobalMetadata()

// Find consecutive DEFs of the same type and mark the second one redundant.
void SMPFunction::FindRedundantMetadata(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	bool changed = false;

	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		changed |= CurrBlock->FindRedundantLocalMetadata(this->IsSafe());
	}
	return;
} // end of SMPFunction::FindRedundantMetadata()

// Perform SCCP to find constant values for DEFs, store in this->ConstantDefs
void SMPFunction::SparseConditionalConstantPropagation(void) {
	// We perform the SCCP (Sparse Conditional Constant Propagation) algorithm
	//  as found in Cooper & Torczon, "Engineering a Compiler."
#if STARS_DEBUG_FUNC_SCCP
	bool DebugFlag = (0x8049d20 == this->GetFirstFuncAddr());
#endif

	// CFGWorkList := { all edges from pseudo-entry block to entry blocks }
	//  We do not have a pseudo-entry block, so we special case by starting processing at
	//  the first basic block, block number 0, which is our entry block.
	list<pair<int, int> > CFGWorkList; // edges from pair.first = source block number to pair.second = dest block number
	pair<int, int> InitEdge(-1, 0); // -1 is pseudo-block-number
	CFGWorkList.push_back(InitEdge);
	std::size_t BlockNumLimit = this->Blocks.size();
	vector<STARSBitSet> ExecutedEdgeBitSet(BlockNumLimit); // records which edges have been executed in SCCP; row = DestBlockNum, col (bit) = SrcBlockNum
	vector<STARSBranchConst> CurrentBranchStatus;
	CurrentBranchStatus.resize(BlockNumLimit);

	// for each edge e in the CFG
	//  mark e as unexecuted
	for (std::size_t EdgeIndex = 0; EdgeIndex < BlockNumLimit; ++EdgeIndex) {
		ExecutedEdgeBitSet[EdgeIndex].AllocateBits(BlockNumLimit); // allocate and zero all bits
		CurrentBranchStatus[EdgeIndex] = STARS_BRANCH_UNKNOWN;
	}
	this->ResetSCCPVisitedBlocks();  // records which blocks have been visited in SCCP algorithm

	// SSAWorkList := { empty set }
	list<pair<int, int> > SSAWorkList; // pair.first = block number, pair.second = name+SSA hash
	SSAWorkList.clear();

	// for each ref def x in the procedure
	//   Value(x) = TOP
	//  We currently implement this by having this->ConstantDefs contain no entry for defs with value TOP

	// while ((CFGWorkList is not empty) or (SSAWorkList is not empty))
	while (!(CFGWorkList.empty() && SSAWorkList.empty())) {

	//   if (CFGWorkList is not empty) then
	//       remove an edge e = (m, n) from the CFGWorkList
	//       if (e is marked as unexecuted) then
	//          mark e as executed
	//          EvaluateAllPhisInBlock(n)
	//          if (e is only edge into n marked as executed) then [i.e. first visit]
	//              for (each instruction i in block n) do
	//                  if (i is an assignment) then
	//                      EvaluateAssign(i)
	//                  else if (i is a conditional branch) then
	//                      EvaluateConditional(i)
	//                  endif
	//              endfor
	//              Put block successors on CFGWorkList, based on conditional branch evaluation if any
	//          endif
	//       endif
	//   endif

	//   if (CFGWorkList is not empty) then
		if (!(CFGWorkList.empty())) {
	//       remove an edge e = (m, n) from the CFGWorkList
			pair<int, int> CurrentEdge = CFGWorkList.front();
			CFGWorkList.pop_front();
	//       if (e is marked as unexecuted) then
			int SrcBlockNum = CurrentEdge.first;
			int DestBlockNum = CurrentEdge.second;
			bool UnexecutedEdge = (0 > CurrentEdge.first);
#if STARS_DEBUG_FUNC_SCCP_VERBOSE
			if (0x807a530 == this->FirstEA) {
				SMP_msg("DEBUG: SCCP: Processing CFG edge from %d to %d\n", SrcBlockNum, DestBlockNum);
			}
#endif
			if (!UnexecutedEdge) {
				UnexecutedEdge = (!ExecutedEdgeBitSet.at((std::size_t) DestBlockNum).GetBit((std::size_t) SrcBlockNum));
			}
#if STARS_DEBUG_FUNC_SCCP
			if (DebugFlag) {
				SMP_msg("SCCP: Processing edge from block %d to block %d: UnexecutedEdge = %d\n", SrcBlockNum, DestBlockNum, UnexecutedEdge);
			}
#endif
			if (UnexecutedEdge) {
	//          mark e as executed
				if (0 <= SrcBlockNum) {
					ExecutedEdgeBitSet.at((std::size_t) DestBlockNum).SetBit((std::size_t) SrcBlockNum);
				}
	//          EvaluateAllPhisInBlock(n)
				this->EvaluateAllPhiConstants(DestBlockNum, ExecutedEdgeBitSet, SSAWorkList);
	//          if (e is only edge into n marked as executed) then
				SMPBasicBlock *CurrBlock = this->GetBlockByNum(DestBlockNum);
				if (!(CurrBlock->IsSCCPVisited())) {
	//              for (each instruction i in block n) do
	//                  if (i is an assignment) then
	//                      EvaluateAssign(i)
	//                  else if (i is a conditional branch) then
	//                      EvaluateConditional(i)
	//                  endif
	//              endfor
					if (CurrBlock->IsUnreachableBlock()) { // must have a call 0 instruction
						SMP_msg("ERROR: Unreachable block is being visited by SCCP at %llx\n", (unsigned long long) CurrBlock->GetFirstAddr());
					}
					enum STARSBranchConst BranchEval = STARS_BRANCH_UNKNOWN;
					CurrBlock->SCCPEvaluateConstants(BranchEval, CFGWorkList, SSAWorkList); // also marks block as SCCP visited
					CurrentBranchStatus[DestBlockNum] = BranchEval;
	//          endif
				}
	//       endif
			}
	//   endif
		}


	//
	//   if (SSAWorkList is not empty) then
	//       remove an edge e = (s, d) from SSAWorkList
	//       c := CFG node that uses d
	//       if (any edge entering c is marked as executable) then
	//           if (d is a phi function argument) then
	//               EvaluatePhi(d)
	//           else
	//              for (each instruction i in block c) do
	//                  if (i is an assignment that uses d) then
	//                      EvaluateAssign(i)
	//                  else if (i is a conditional branch that uses d) then
	//                      EvaluateConditional(i)
	//                  endif
	//              endfor
	//           endif
	//       endif
	//   endif

	//   if (SSAWorkList is not empty) then
		if (!(SSAWorkList.empty())) {
	//       remove an edge e = (s, d) from SSAWorkList
			pair<int, int> SSAEdge = SSAWorkList.front();
			SSAWorkList.pop_front();
			int BlockNum = SSAEdge.first;
			int DefHashValue = SSAEdge.second;
	//       c := CFG node that uses d
			assert(0 <= BlockNum);
			SMPBasicBlock *CurrBlock = this->GetBlockByNum((std::size_t) BlockNum);
#if STARS_DEBUG_FUNC_SCCP_VERBOSE
			if (0x807a530 == this->FirstEA) {
				SMP_msg("DEBUG: SCCP: Processing SSA item block %d DefHash %x\n", BlockNum, DefHashValue);
			}
#endif
			STARSOpndTypePtr DefOp = (*(this->GetFirstInstIter()))->MakeRegOpnd((STARS_regnum_t)(DefHashValue & 0x0000ffff));
			unsigned int TempSSANum = (unsigned int) (DefHashValue & 0xffff0000);
			int DefSSANum = (int)(TempSSANum >> 16);
			bool LocalName = CurrBlock->IsLocalName(DefOp);
	//       if (any edge entering c is marked as executable) then
	//           if (d is a phi function argument) then
	//               EvaluatePhi(d)
	//           else
	//              for (each instruction i in block c) do
	//                  if (i is an assignment that uses d) then
	//                      EvaluateAssign(i)
	//                  else if (i is a conditional branch that uses d) then
	//                      EvaluateConditional(i)
	//                  endif
	//              endfor
	//           endif
	//       endif
			// NOTE: We differ slightly from the published implementation. We don't maintain an SSA def-use chain explicitly,
			//  so we add the current block to the SSAWorkList when we find a new constant DEF, and search that block for
			//  further USEs of that operand with that SSA number.
			assert(CurrBlock->IsSCCPVisited()); // a local name is only added to SSAWorkList from within the block
			vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst();
			if (!LocalName) {
				// Speed up search by starting at the DEF inst.
				InstIter = this->GetBlockInstIterBySSA(DefOp, DefSSANum);
			}
			else {
				STARS_ea_t DefAddr = CurrBlock->GetLocalDefAddrForRegHash(DefHashValue);
				InstIter = CurrBlock->GetInstIterFromAddr(DefAddr);
			}
			bool FoundReDEF = false;
			while (InstIter != CurrBlock->GetLastInst()) {
				SMPInstr *CurrInst = (*InstIter);
				assert(NULL != CurrInst);
				set<DefOrUse, LessDefUse>::iterator UseIter = CurrInst->FindUse(DefOp);
				if (UseIter != CurrInst->GetLastUse()) { // operand is USEd; check SSANum
					int UseSSANum = UseIter->GetSSANum();
					if (UseSSANum == DefSSANum) {
						SMPitype DataFlowType = CurrInst->GetDataFlowType();
						if (DEFAULT == DataFlowType) {
							CurrInst->SCCPEvaluateAssignment(SSAWorkList);
						}
						else if (COND_BRANCH == DataFlowType) {
							enum STARSBranchConst BranchEval;
							CurrInst->SCCPEvaluateCondBranch(BranchEval);
							CurrentBranchStatus[BlockNum] = BranchEval;
							CurrBlock->SCCPHandleSuccessors(BranchEval, CFGWorkList);
						}
					}
				}
				set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->FindDef(DefOp);
				if (DefIter != CurrInst->GetLastDef()) { // check for re-DEF
					int NewDefSSANum = DefIter->GetSSANum();
					if (NewDefSSANum > DefSSANum) { // re-DEF; we can stop searching for USEs of DefOp/DefSSANum in this block
						FoundReDEF = true;
						break;
					}
				}
				++InstIter;
			} // end while not last inst in block
			// See if we need to search for other blocks that use DefOp/DefSSANum
			if (!(LocalName || FoundReDEF)) { // we have global name that is not redefined
				list<SMPBasicBlock *>::iterator SuccIter;
				for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
					SMPBasicBlock *SuccBlock = (*SuccIter);
					if (SuccBlock->IsSCCPVisited() && SuccBlock->IsLiveIn(DefOp)) {
						this->ResetProcessedBlocks();
						SuccBlock->SCCPGlobalPropagationHelper(DefOp, DefSSANum, ExecutedEdgeBitSet, CFGWorkList, SSAWorkList);
					}
				}
			}
	//   endif
		}
#if 0
		SSAWorkList.clear(); // temporary stub
#endif
	// endwhile
	}

	// Go back over the basic blocks and detect never-visited blocks. Label these as unreachable.
	list<SMPBasicBlock *>::iterator BlockIter;
	bool FoundUnreachableBlock = false;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		if (!(CurrBlock->IsSCCPVisited())) {
			vector<SMPInstr *>::iterator FirstInstIter = CurrBlock->GetFirstInst();
			SMPInstr *FirstInst = (*FirstInstIter);
			if (!this->GetProg()->IsCodeXrefFromData(FirstInst->GetAddr())) {
				CurrBlock->SetUnreachableBlock(true);
				FoundUnreachableBlock = true;
				SMP_fprintf(global_STARS_program->GetInfoAnnotFile(), "%18llx %6zu INSTR UNREACHABLE %s \n",
					(uint64_t) FirstInst->GetAddr(), FirstInst->GetSize(), FirstInst->GetDisasm());
#if STARS_DEBUG_FUNC_SCCP
				STARS_ea_t BlockAddr = CurrBlock->GetFirstAddr();
				SMP_msg("INFO: SCCP found unreachable block at %llx\n", (unsigned long long) BlockAddr);
#endif
				this->GetProg()->AddBlockToRemovalList(CurrBlock);
#if STARS_SCCP_CONVERT_UNREACHABLE_BLOCKS
				CurrBlock->SCCPNullifyUnreachableBlock();
#endif
			}
		}
#if STARS_SCCP_GATHER_STATISTICS
		else { 
			vector<SMPInstr *>::iterator LastInstIter = CurrBlock->GetLastInst();
			--LastInstIter;
			SMPInstr *LastInst = (*LastInstIter);
			STARS_ea_t BranchAddr = LastInst->GetAddr();
			STARSBranchConst BranchEval = CurrentBranchStatus[CurrBlock->GetNumber()];
			if (STARS_BRANCH_ALWAYS_TAKEN == BranchEval) {
				SMP_fprintf(global_STARS_program->GetInfoAnnotFile(), "%18llx %6zu INSTR BRANCHALWAYSTAKEN %s \n",
					(uint64_t) BranchAddr, LastInst->GetSize(), LastInst->GetDisasm());
				this->AddResolvedBranch(BranchAddr);
			}
			else if (STARS_BRANCH_NEVER_TAKEN == BranchEval) {
				SMP_fprintf(global_STARS_program->GetInfoAnnotFile(), "%18llx %6zu INSTR BRANCHNEVERTAKEN %s \n",
					(uint64_t) BranchAddr, LastInst->GetSize(), LastInst->GetDisasm());
				this->AddResolvedBranch(BranchAddr);
			}
			if (CurrBlock->HasCallInstruction()) {
				CurrBlock->SCCPGatherStatistics();
			}
		}
#endif
	}

	return;
} // end of SMPFunction::SparseConditionalConstantPropagation() 

// emit debug output if SCCP found constant call target for an indir call or jump
void SMPFunction::AuditSCCPForIndirectTargets(void) {
	list<SMPInstr *>::iterator InstIter = this->GetFirstInstIter();
	++InstIter; // skip SSA Marker inst
	for (; InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		SMPitype CurrDataFlow = CurrInst->GetDataFlowType();
		if (((CurrDataFlow == INDIR_CALL) || (CurrDataFlow == INDIR_JUMP)) && (!CurrInst->IsInterruptCall())) {
			const STARSOpndTypePtr &TargetOp = CurrInst->GetControlFlowTarget();
			STARSOpndTypePtr SearchOp = CloneIfSubwordReg(TargetOp);
			CanonicalizeOpnd(SearchOp);
			if (TargetOp->IsRegOp()) { // can only find SCCP constant values for regs
				set<DefOrUse, LessDefUse>::iterator TargetIter = CurrInst->FindUse(SearchOp);
				int UseSSANum = TargetIter->GetSSANum();
				int UseHashValue = HashGlobalNameAndSSA(TargetOp, UseSSANum);
				STARS_SCCP_Const_Struct ConstStruct;
				ConstStruct.ConstType = STARS_CONST_TOP; // default to no const found
				if (CurrInst->GetBlock()->IsLocalName(TargetOp)) { // local name
					map<int, struct STARS_SCCP_Const_Struct>::iterator ConstValIter = CurrInst->GetBlock()->FindLocalConstValue(UseHashValue);
					if (ConstValIter != CurrInst->GetBlock()->GetLastLocalConstValueIter()) { // Has current const val entry
						ConstStruct = ConstValIter->second;
					}
				}
				else { // global name
					STARSSCCPMapIter ConstValIter = CurrInst->GetBlock()->GetFunc()->FindConstValue(UseHashValue);
					if (ConstValIter != CurrInst->GetBlock()->GetFunc()->GetLastConstValueIter()) { // Has current const val entry
						ConstStruct = ConstValIter->second;
					}
				}
				if (ConstStruct.ConstType == STARS_CONST_HAS_VALUE) {
					SMP_msg("INFO: SCCP found constant value of %llx for indirect target at %llx\n",
						(unsigned long long) ConstStruct.ConstValue, (unsigned long long) CurrInst->GetAddr());
					pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->ConstantIndirCalls.insert(CurrInst->GetAddr());
					assert(InsertResult.second);
				}
			}
		}
	}
	return;
} // end of SMPFunction::AuditSCCPForIndirectTargets()

// part of SCCP processing; propagate const DEFs into Phi USEs and Phi DEFs
void SMPFunction::EvaluateAllPhiConstants(int BlockNum, const vector<STARSBitSet> &ExecutedEdgeBitSet, list<pair<int, int> > &SSAWorkList) {
	// For all Phi DEFs of the type we are tracking for const values:
	//   If Phi DEF const value is not already the lattice bottom value:
	//     Accumulate const DEF values only for Phi USEs that correspond to incoming
	//      edges that are executed.
	//     If we have a consistent const value, set Phi DEF const value to it; if
	//      we have conflicting const values, set Phi DEF const value to type lattice bottom.
	//     If we set the Phi DEF const value to a new value, then propagate along SSA edges.

	// For all Phi DEFs of the type we are tracking for const values:
	SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);
	set<SMPPhiFunction, LessPhi>::iterator PhiIter;
	for (PhiIter = CurrBlock->GetFirstPhi(); PhiIter != CurrBlock->GetLastPhi(); ++PhiIter) {
		STARSOpndTypePtr PhiOp = PhiIter->GetAnyOp();
		if (!(PhiOp->IsRegOp()))   // !!!!****!!!! Add stack locations also in safe functions
			continue;
		int DefSSANum = PhiIter->GetDefSSANum();
		int DefHashValue = HashGlobalNameAndSSA(PhiOp, DefSSANum);
	//   If Phi DEF const value is not already the lattice bottom value:
		STARSSCCPMapIter ConstIter = this->FindConstValue(DefHashValue);
		STARSConstantValueType DefConstValType = STARS_CONST_TOP; // default; no entry in map
		if (ConstIter != this->GetLastConstValueIter()) { // found entry in map
			DefConstValType = ConstIter->second.ConstType;
		}
		if (DefConstValType != STARS_CONST_BOTTOM) {
	//     Accumulate const DEF values only for Phi USEs that correspond to incoming
	//      edges that are executed.
			STARS_uval_t ConstUseVal = 1;
			bool ConstUseValueSeen = false; // value seen on executed incoming edge Phi USE
			bool ConstNonValueSeen = false; // one executed incoming edge Phi USE had no value
			list<SMPBasicBlock *>::iterator PredIter;
			std::size_t PredIndex = 0;
			STARSConstantValueType DefFinalConstValType = DefConstValType;
			for (PredIter = CurrBlock->GetFirstPred(); PredIter != CurrBlock->GetLastPred(); ++PredIter, ++PredIndex) {
				int IncomingBlockNum = (*PredIter)->GetNumber();
				bool ExecutedIncomingEdge = ExecutedEdgeBitSet.at(BlockNum).GetBit(IncomingBlockNum);
				if (ExecutedIncomingEdge) {
					int UseSSANum = PhiIter->GetUseSSANum(PredIndex);
					int UseHashValue = HashGlobalNameAndSSA(PhiOp, UseSSANum);
	//     If we have a consistent const value, set Phi DEF const value to it; if
	//      we have conflicting const values, set Phi DEF const value to type lattice bottom.
					ConstIter = this->FindConstValue(UseHashValue);
					STARSConstantValueType UseConstValType = STARS_CONST_TOP; // default; no entry in map
					if (ConstIter != this->GetLastConstValueIter()) { // found entry in map
						UseConstValType = ConstIter->second.ConstType;
					}
					else {
						ConstNonValueSeen = true;
					}
					if (UseConstValType == STARS_CONST_HAS_VALUE) {
						if (ConstUseValueSeen) { // check for consistency of values
							if (ConstUseVal != ConstIter->second.ConstValue) { // inconsistent const values
								DefFinalConstValType = STARS_CONST_BOTTOM;
								break; // no need to see more Phi USEs
							}
						}
						else { // this is the first const value we have seen
							DefFinalConstValType = STARS_CONST_HAS_VALUE; // so far, anyway
							ConstUseValueSeen = true;
							ConstUseVal = ConstIter->second.ConstValue; // only value seen so far
						}
					}
					else if (UseConstValType == STARS_CONST_BOTTOM) {
						// Any BOTTOM value in a USE makes the DEF also become BOTTOM.
						DefFinalConstValType =  STARS_CONST_BOTTOM;
						break; // no need to see more Phi USEs
					}
					// else must be STARS_CONST_TOP, which is a don't-care case
				} // end if (ExecutedIncomingEdge)
			} // end for PredIter iteration through predecessor blocks

			// NOTE: It seems at first glance to be an error to propagate a new constant value
			//  from a Phi USE to the Phi DEF when there were Phi USEs that we ignored because
			//  their incoming edges were not executed. It seems that we are saying that:
			//  EDX20 := phi(EDX19, EDX21) produces EDX20 := 0 just because EDX19 == 0 even though
			//  EDX21 is still the lattice TOP value, having not been determined yet. In fact, this
			//  is the key feature of SCCP. On at least one path, EDX can have the value zero, so a
			//  path through the current block could occur with EDX == 0, and if that causes only one
			//  of the current block's successors to be added to the CFG WorkList, then the other successor
			//  still has a chance to be added later after the value of EDX21 is determined, because
			//  EvaluateAllPhiConstants() will be executed each time we enter the current block from
			//  an entry in the CFG WorkList. So, even if we have a test at the bottom of the block
			//  that determines the successor block based on EDX20 directly:
			//   test edx,edx
			//   jz label2
			//  all we are determining at the present time is that one possible path leads to label2, and
			//  the block at label2 is reachable and should be added to the work list. If EDX21 is later
			//  determined to have a non-zero value, then EvaluateAllPhiConstants() will be called again,
			//  producing the BOTTOM lattice value for EDX20, and both successor blocks of the current block
			//  will be placed on the CFG WorkList. Thus, even though the current invocation seems to lead to
			//  deciding that the branch to label2 is always taken, by using the SSAWorkList to propagate EDX==0
			//  down to the bottom of the block, this is not a final determination.

	//     If we set the Phi DEF const value to a new value, then propagate along SSA edges.
			if (!ConstNonValueSeen && (DefFinalConstValType != DefConstValType)) { // const entry is changing
				struct STARS_SCCP_Const_Struct NewConstEntry;
				NewConstEntry.ConstType = DefFinalConstValType;
				NewConstEntry.ConstValue = ConstUseVal;
				if (DefConstValType == STARS_CONST_TOP) { // there was no old map entry; insert new one
					pair<int, struct STARS_SCCP_Const_Struct> NewMapEntry(DefHashValue, NewConstEntry);
					pair<map<int, struct STARS_SCCP_Const_Struct>::iterator, bool> InsertResult = this->ConstantDefs.insert(NewMapEntry);
					assert(InsertResult.second);
				}
				else { // old map entry needs to be changed
					this->ConstantDefs[DefHashValue] = NewConstEntry;
				}
				// Propagate along SSA edges.
				pair<int, int> SSAEdge(BlockNum, DefHashValue);
				SSAWorkList.push_back(SSAEdge);
			} // end if entry is changing
		} // end if previous DEF value was not already BOTTOM
	} // end for all Phi functions
	return;
} // end of SMPFunction::EvaluateAllPhiConstants()

// Do we not care if DEF underflowed, due to how it is used?
bool SMPFunction::IsBenignUnderflowDEF(const STARSOpndTypePtr &DefOp, int DefSSANum, STARS_ea_t DefAddr, int &IdiomCode) {
	bool benign = false;
	list<SMPInstr *>::iterator InstIter;
	set<DefOrUse, LessDefUse>::iterator DefIter, UseIter;
	int UseSSANum;
	SMPOperandType DefType;

	// We are looking to suppress overflow and underflow warnings on the following
	//  code sequence: PTR1-PTR2+1 gets a loop invariant code motion optimization
	//  that pulls  temp := 1-PTR2 out of the loop, and leaves temp2 := PTR1+temp
	//  inside the loop. The hoisted subtraction could underflow, and the addition
	//  that is not hoisted could overflow. The net effect of these two instructions
	//  is benign, however, so we want to suppress underflow and overflow checks on
	//  both of them, but only if we can match the pair of instructions.
	
	// We know that DefOp/DefAddr/DefSSANum refer to a subtraction instruction that
	//  produces a NEGATEDPTR result. We only need to find the paired addition instruction
	//  that USEs the same SSA name to produce a PTROFFSET result to prove that we have
	//  a case of benign underflow and overflow. If we find such a pair, we will mark
	//  both of their DEF results as benign overflows to suppress overflow checks.

	// PAINFUL: Linear search of instructions. Need to address this in the future.
	//  Perhaps we should have a map of UseHashValue to InstAddr, but that is more
	//  memory consumption. Sure would be useful, though.
	for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		UseIter = CurrInst->FindUse(DefOp);
		if (UseIter == CurrInst->GetLastUse()) {
			continue;
		}
		UseSSANum = UseIter->GetSSANum();
		if (UseSSANum == DefSSANum) {
			// Only remaining question: Do we produce a PTROFFSET in CurrInst? (If we do,
			//  that implies we had an addition, so we don't need to check that.)
			DefIter = CurrInst->GetFirstNonFlagsDef();
			if (DefIter != CurrInst->GetLastDef()) {
				DefType = DefIter->GetType();
				// NOTE: Make this more general. What if we just move the NEGATEDPTR into a register
				//  and then the next instruction, with different SSA name, produces the PTROFFSET?
				//  !!!!!*****!!!!!
				if (IsEqType(DefType, PTROFFSET)) {
					// Found a pair. Mark both DEFs as benign and return true.
					benign = true;
					IdiomCode = 4;
					// Note that we have two possibilities for the addition. The NEGATEDPTR could be
					//  both the DEF and a USE, e.g. add negptr,ptr1; or the NEGATEDPTR could be
					//  just a USE, e.g. add reg,negptr, so that reg is overwritten and becomes a
					//  PTROFFSET. It really does not matter. The point is that we want to ignore
					//  overflow on this addition, and also on the subtraction that produced the
					//  NEGATEDPTR, so we mark the DEF in each instruction as benignly overflowing.
					STARSOpndTypePtr UseInstDefOp = DefIter->GetOp();
					DefIter = CurrInst->SetDefNoOverflow(UseInstDefOp, true);
					SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
					(void) DefInst->SetDefNoOverflow(DefOp, true);
					break;
				}
			}
		}
	}

	return benign;
} // end of SMPFunction::IsBenignUnderflowDEF()

bool SMPFunction::HasIntErrorCallSink(const STARSOpndTypePtr &DefOp, int DefSSANum, STARS_ea_t DefAddr, std::string &SinkString, bool &FoundAnyCall) {
	this->ResetProcessedBlocks(); // prepare for recursion through blocks
	SinkString.clear();
	SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(DefAddr);
	assert(CurrBlock != NULL);

	bool FoundSink = CurrBlock->IsCriticalSink(DefOp, DefSSANum, SinkString, FoundAnyCall);

	return FoundSink;
} // end of SMPFunction::HasIntErrorCallSink()

// Recompute LVA and SSA and all dependent data structures now that unreachable blocks have been removed.
void SMPFunction::RecomputeSSA(void) {
	this->FreeSSAMemory();
	this->LoopCount = 0;
	this->LoopTestBlocksByLoopNum.clear();
	this->LoopTypesByLoopNum.clear();
	this->FuncLoopsByBlock.clear();
	this->FuncBlocksByLoop.clear();
	this->GlobalDefAddrBySSA.clear();
	this->LoopInductionVars.clear();
#if 1 // need to debug
	this->RPONumberBlocks(); // Re-do the RPO numbering now that a block is removed.
#endif
	this->CallerSavedLocalRegsBitmap.reset(); // reset all bits to zero
	this->LiveVariableAnalysis(true);
	this->ComputeSSA();
	return;
} // end of SMPFunction::RecomputeSSA()

// Compute SSA form data structures across the function.
void SMPFunction::ComputeSSA(void) {
	bool DebugFlag = false;
	bool DumpFlag = false;
#if SMP_DEBUG_DATAFLOW
	DumpFlag |= (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
	DebugFlag |= (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
#endif

#if 1
	if (DumpFlag)
		this->Dump();
#endif
	if (DebugFlag) SMP_msg("Computing global names.\n");
	this->ComputeGlobalNames();
	if (DebugFlag) SMP_msg("Computing IDoms.\n");
	this->ComputeIDoms();
	if (DebugFlag) SMP_msg("Computing Dom frontiers.\n");
	this->ComputeDomFrontiers();
	if (DebugFlag) SMP_msg("Computing blocks defined in.\n");
	this->ComputeBlocksDefinedIn();

	if (DebugFlag) SMP_msg("Inserting Phi functions.\n");
	this->InsertPhiFunctions();
	if (DebugFlag) SMP_msg("Building dominator tree.\n");
	this->BuildDominatorTree();
	this->HasReducibleCFG = this->TestCFGReducibility();
	if (this->HasReducibleControlFlow()) {
		this->HasStructuredCFG = (!this->HasUnresolvedIndirectJumps());
		if (!this->HasStructuredControlFlow()) {
			SMP_msg("ERROR: SPARK: Unstructured CFG due to unresolved indirect jumps in %s\n", this->GetFuncName());
		}
	}
	if (DebugFlag) SMP_msg("Computing SSA renumbering.\n");
	this->SSARenumber();
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	int LoopHeadBlockNum = SMP_BLOCKNUM_UNINIT;
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		this->FindLoopHeadsAndTails(CurrBlock);

		if (DumpFlag) CurrBlock->Dump();

		if (DebugFlag) SMP_msg("Computing local names.\n");
		CurrBlock->SetLocalNames();
		if (DebugFlag) SMP_msg("Computing local SSA renumbering.\n");
		CurrBlock->SSALocalRenumber();
		if (DumpFlag) CurrBlock->Dump();

		bool SingleExprBlock = CurrBlock->FindSingleExpression();
		if (SingleExprBlock) {
			CurrBlock->SetSingleExpression();
		}
	}
	this->HasGoodSSA = true;

	// We have to mark dead regs after SetLocalNames() has run for all blocks
	//  so that the CallerSavedLocalRegs bitmap is complete.
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		if (DebugFlag) SMP_msg("Marking dead registers.\n");
		CurrBlock = (*BlockIter);
		CurrBlock->MarkDeadRegs();
	}

	// Analyze system calls, if any.
	if (this->HasSystemCalls()) {
		this->AnalyzeSystemCalls();
	}

#if STARS_BUILD_LOOP_BITSET
	// Now that we know how many loops we have, we can allocate the loops data structures.
	assert(this->BlockCount == ((int) this->Blocks.size()));
	this->FuncLoopsByBlock.resize(this->BlockCount);
	for (std::size_t BlockIndex = 0; ((int) BlockIndex) < this->BlockCount; ++BlockIndex) {
		this->FuncLoopsByBlock.at(BlockIndex).AllocateBits(this->LoopCount);
	}

	this->FuncBlocksByLoop.resize(this->LoopCount);
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		this->FuncBlocksByLoop.at(LoopIndex).AllocateBits(this->BlockCount);
	}
	this->LoopTypesByLoopNum.resize(this->LoopCount);
	this->LoopTestBlocksByLoopNum.resize(this->LoopCount);

	if (this->LoopCount > 0) {
		this->DetectLoops();
		if (this->HasStructuredControlFlow()) {
			this->DetectLoopInvariantDEFs();
			if (global_STARS_program->ShouldSTARSPerformDeepLoopAnalyses()) {
				this->DetectLoopInductionVars();
			}
		}
	}
	bool FoundSafeWrites = this->AnalyzeMemWriteSafety();
	if (FoundSafeWrites) {
		SMP_msg("INFO: Found safe indirect memory writes in func at %p\n", this->GetFirstFuncAddr());
	}
#endif

#if 0  // Need less conservative USEs on call insts to get good uninit var results
	this->DetectUninitializedVars();
#endif

#if SMP_DEBUG_DATAFLOW
	if (DumpFlag)
		this->Dump();
#endif

	// Once SSA numbers have been set into all DEFs, USES, and DU-chains, then
	//  the SSA numbering data structures will no longer be used and can be
	//  de-allocated.
	this->FreeSSAMemory();
	return;
} // end of SMPFunction::ComputeSSA()

// Detect loop head and/or tail block for loops and record in data structures
void SMPFunction::FindLoopHeadsAndTails(SMPBasicBlock *CurrBlock) {
	bool LoopTail = false;
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
	int HeaderBlockNum = SMP_BLOCKNUM_UNINIT;
	list<SMPBasicBlock *>::iterator SuccIter;
	for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
		int SuccBlockNum = (*SuccIter)->GetNumber();
		int OuterLoopHeadBlockNum = SMP_BLOCKNUM_UNINIT;
		if (this->DoesBlockDominateBlock(SuccBlockNum, CurrBlock->GetNumber())) {
			CurrBlock->SetLoopTailBlock();
			(*SuccIter)->SetLoopHeaderBlock();
			HeaderBlockNum = SuccBlockNum;
			if (!LoopTail) {
				LoopTail = true;
				CurrBlock->SetLoopHeaderBlockNumberForTailBlock(SuccBlockNum);
			}
			else {
				CurrBlock->SetLoopDoubleTailBlock();
				SMP_msg("INFO: LoopDoubleTailBlock: block %d in func %s\n",
					CurrBlock->GetNumber(), this->GetFuncName());
				// Which loop header block is outermost? It dominates the other header.
				if (this->DoesBlockDominateBlock(SuccBlockNum, CurrBlock->GetLoopHeaderNumber())) {
					CurrBlock->SetOuterLoopHeaderBlockNumberForDoubleTailBlock(SuccBlockNum);
				}
				else {
					CurrBlock->SetOuterLoopHeaderBlockNumberForDoubleTailBlock(CurrBlock->GetLoopHeaderNumber());
					CurrBlock->SetLoopHeaderBlockNumberForTailBlock(SuccBlockNum);
				}
			}

			size_t LoopIndex;
			if (this->IsBlockNumLoopHeader(HeaderBlockNum, LoopIndex)) {
				// Multiple branches of an if-then-elsif-else can return to the top of a loop,
				//  so we can encounter loop tail blocks multiple times for a loop. We found
				//  the LoopHeadBlockNum already in our list, so this must be one of these cases.
				// See if we can provide a better follow node number from the current analysis.
				assert((0 <= LoopIndex) && (LoopIndex < this->LoopFollowNodes.size()));
			}
			else {
				this->LoopHeadBlockNumbers.push_back(HeaderBlockNum);
				this->LoopFollowNodes.push_back(FollowBlockNum);
				++this->LoopCount;
				assert(this->LoopHeadBlockNumbers.size() == this->LoopCount);
				// If this is a double tail block, we need to arrange Loop N == inner, loop N-1 == outer.
				if (CurrBlock->IsDoubleLoopTailBlock()) {
					OuterLoopHeadBlockNum = CurrBlock->GetOuterLoopHeaderNumberForDoubleTailBlock();
					int InnerLoopHeadBlockNum = CurrBlock->GetLoopHeaderNumber();
					if (OuterLoopHeadBlockNum != this->LoopHeadBlockNumbers[this->LoopCount - 1]) {
						assert(InnerLoopHeadBlockNum == this->LoopHeadBlockNumbers[this->LoopCount - 1]);
#if 0 // incorrect, unnecessary?
						this->LoopHeadBlockNumbers[this->LoopCount - 2] = InnerLoopHeadBlockNum;
						this->LoopHeadBlockNumbers[this->LoopCount - 1] = OuterLoopHeadBlockNum;
#endif
					}
				}
			}

		}
	}

	return;
} // end of SMPFunction::FindLoopHeadsAndTails()

// Is BlockNum already in this->LoopHeadBlockNumbers? If so, put index in LoopNum
bool SMPFunction::IsBlockNumLoopHeader(const int BlockNum, std::size_t &LoopNum) const {
	size_t LoopIndex;
	bool FoundHeader = false;
	for (LoopIndex = 0; LoopIndex < this->LoopHeadBlockNumbers.size(); ++LoopIndex) {
		if (BlockNum == this->LoopHeadBlockNumbers[LoopIndex]) {
			FoundHeader = true;
			LoopNum = LoopIndex;
			break;
		}
	}
	return FoundHeader;
} // end of SMPFunction::IsBlockNumLoopHeader()

// populate FuncLoopsByBlock and FuncBlocksByLoop data structures for DetectLoops().
void SMPFunction::FindLoopBlocksFromTailToHeader(const size_t LoopNumber, const int HeadBlockNum, list<SMPBasicBlock *>::iterator BlockIter, int &DoubleTailFollowBlockNum) {
	list<list<SMPBasicBlock *>::iterator> BlockWorkList;
	BlockWorkList.push_back(BlockIter);
	SMPBasicBlock *WorkBlock;
	SMPBasicBlock *PredBlock;
	list<SMPBasicBlock *>::iterator WorkIter;
	list<SMPBasicBlock *>::iterator PredIter;
	bool WritesMemoryStaticOrIndirect = false;
	bool ReadsMemoryStaticOrIndirect = false;
	bool DoubleTailFlag = (*BlockIter)->IsDoubleLoopTailBlock();
	do {
		WorkIter = BlockWorkList.front();
		BlockWorkList.pop_front();
		WorkBlock = (*WorkIter);
		if (WorkBlock->HasIndirectMemWrite() || WorkBlock->HasStaticMemWrite()) {
			WritesMemoryStaticOrIndirect = true;
		}
		if (WorkBlock->HasIndirectMemRead() || WorkBlock->HasStaticMemRead()) {
			ReadsMemoryStaticOrIndirect = true;
		}
		int WorkBlockNum = WorkBlock->GetNumber();
		assert(WorkBlockNum != SMP_BLOCKNUM_UNINIT);
		assert(WorkBlockNum < ((int) this->Blocks.size()));

		if (!(WorkBlock->IsProcessed())) {
			assert(LoopNumber < this->LoopCount);
			this->FuncLoopsByBlock[(std::size_t) WorkBlockNum].SetBit(LoopNumber);
			this->FuncBlocksByLoop[LoopNumber].SetBit((size_t)WorkBlockNum);
			WorkBlock->SetProcessed(true);

			// Add unprocessed predecessors to the work list until we reach the loop head.
			if (WorkBlockNum != HeadBlockNum) {
				for (PredIter = WorkBlock->GetFirstPred(); PredIter != WorkBlock->GetLastPred(); ++PredIter) {
					PredBlock = (*PredIter);
					bool AlreadyProcessed = PredBlock->IsProcessed();
					if (!AlreadyProcessed) {
						BlockWorkList.push_back(PredIter);
					}
				}
			}
		}
	} while (!BlockWorkList.empty());
	this->LoopWritesMemory.push_back(WritesMemoryStaticOrIndirect);
	this->LoopReadsMemory.push_back(ReadsMemoryStaticOrIndirect);

	if (DoubleTailFlag) {
		// Out of the two successors to a double tail block,
		//  one successor will be the head of the inner loop
		//  and the other successor will be the head of the outer loop.
		// Therefore, the only valid FollowBlockNum for both loops is
		//  the loop exit successor for the outer loop.
		// However, the outer loop has two cases:
		//  1. It is a top-testing loop, in which case we only need to find
		//     the successor to the outer loop header block that is outside the
		//     outer loop.
		//  2. It is a middle-testing loop, so we must find some other FollowBlockNum
		//     for the outer loop and use it for the inner loop, also.
		// Unfortunately, we are in the middle of the process of distinguishing
		//  top-testing from middle-testing loops. But we can simplify both cases
		//  into one with this observation: As we trace from the double tail block
		//  back up to the outer loop header, any block successor that is not in the
		//  outer loop is the FollowBlockNum for the outer loop, and hence for the
		//  inner loop, also. When we are processing the inner loop, we will mistakenly
		//  identify the header of the outer loop as the FollowBlockNum. Immediately
		//  after doing so, this method will be called with the outer loop number, and
		//  the correct FollowBlockNum will be written. The caller of this method
		//  will record this second FollowBlockNum for both loops.
		list<size_t> BlockList;
		this->BuildLoopBlockList(LoopNumber, BlockList);
		for (size_t BlkNum : BlockList) {
			SMPBasicBlock *LoopBlock = this->GetBlockByNum(BlkNum);
			list<SMPBasicBlock *>::const_iterator SuccIter = LoopBlock->GetSuccNotInLoop(LoopNumber);
			if (SuccIter != LoopBlock->GetLastConstSucc()) {
				DoubleTailFollowBlockNum = (*SuccIter)->GetNumber();
				break;
			}
		}
	}

	return;
} // end of SMPFunction::FindLoopBlocksFromTailToHeader()

// Detect which blocks are in which loops and populate FuncLoopsByBlock and FuncBlocksByLoop data structures.
void SMPFunction::DetectLoops(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	map<int, int> DoubleTailBlockNumToFollowBlockMap; // keep track of tricky FollowBlockNum
		// analysis results for DoubleTailBlocks

	std::size_t LoopNumber = 0;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		if (CurrBlock->IsLoopTailBlock()) {
			// For each loop tail block, get its loop header block number (the target
			//  block for its back edge). Then traverse the CFG upwards, recording all
			//  of the basic block numbers that lie between the loop tail and loop head,
			//  along with the tail and head.
			// Along the way, record whether the loop writes to static memory or has
			//  an indirect memory write.
			this->ResetProcessedBlocks();
			int TailBlockNum = CurrBlock->GetNumber();
			int HeadBlockNum = CurrBlock->GetLoopHeaderNumber();
			assert((TailBlockNum != SMP_BLOCKNUM_UNINIT) && (HeadBlockNum != SMP_BLOCKNUM_UNINIT));
			bool FoundHeader = this->IsBlockNumLoopHeader(HeadBlockNum, LoopNumber);
			assert(FoundHeader);
			int DoubleTailFollowBlockNum = SMP_BLOCKNUM_UNINIT;
			this->FindLoopBlocksFromTailToHeader(LoopNumber, HeadBlockNum, BlockIter, DoubleTailFollowBlockNum);
			// If double tail block, trace outer block.
			if (CurrBlock->IsDoubleLoopTailBlock()) {
				HeadBlockNum = CurrBlock->GetOuterLoopHeaderNumberForDoubleTailBlock();
				this->ResetProcessedBlocks();
				// Get new LoopNumber.
				bool FoundHeader = this->IsBlockNumLoopHeader(HeadBlockNum, LoopNumber);
				assert(FoundHeader);
				this->FindLoopBlocksFromTailToHeader(LoopNumber, HeadBlockNum, BlockIter, DoubleTailFollowBlockNum);
				DoubleTailBlockNumToFollowBlockMap[TailBlockNum] = DoubleTailFollowBlockNum;
			}

		} // end if tail block
	} // end for all blocks

	// Now, classify the loops as top-testing, bottom-testing, or middle-testing, and
	//  find the follow block number for the loops.
	for (size_t BlockNum = 0; BlockNum < this->GetNumBlocks(); ++BlockNum) {
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);
		if (CurrBlock->IsLoopTailBlock()) {
			int TailBlockNum = CurrBlock->GetNumber();
			int HeadBlockNum = CurrBlock->GetLoopHeaderNumber();
			assert((TailBlockNum != SMP_BLOCKNUM_UNINIT) && (HeadBlockNum != SMP_BLOCKNUM_UNINIT));
			bool FoundHeader = this->IsBlockNumLoopHeader(HeadBlockNum, LoopNumber);
			assert(FoundHeader);
			// Examine the head and tail blocks to determine the loop type. CurrBlock is the tail block.
			SMPBasicBlock *HeadBlock = this->GetBlockByNum((std::size_t) HeadBlockNum);
			// 1996 Cristina Cifuentes paper, "Structuring Decompiled Graphs," has a simple formula
			//  for classifying loops as top-testing, bottom-testing, or infinite (with possible middle-testing exit).
			//  I have modified it here to account for possible bottom-testing loop that begins with an if-then, because
			//  we do not create empty loop header blocks for loops (we use existing blocks with actual instructions).
			//  TOP-TESTING: Header has more than one successor, with at least one successor outside the loop, while
			//    the tail block has only one successor (the header block).
			//  BOTTOM-TESTING: Header has all successors in the loop (usually just one, but might be if-then-else, etc.),
			//    while the tail block has more than one successor, with at least one outside the loop.
			//  INFINITE or MIDDLE_TESTING: Header has all successors in the loop, tail block has one successor (the header block).
			//  As a truth table, there are logically four possibilities:
			//
			//     HEADER BLOCK                        TAIL BLOCK                         LOOP TYPE
			//     All successors in the loop          Header is only successsor          Infinite or middle-testing
			//     All successors in the loop          >=1 successors outside the loop    Bottom-testing
			//     >=1 successors outside the loop     Header is only successor           Top-testing
			//     >=1 successors outside the loop     >=1 successors outside the loop    Middle-testing with conditional exits at top and bottom
			//
			//  In Ada (to which we might decompile), these four loops look like:
			//    MIDDLE-TESTING or INFINITE:
			//    loop
			//      ...
			//      exit when condition;  [absence of exits would make this an infinite loop]
			//      ...
			//    end loop;
			//
			//    MIDDLE-TESTING with conditional exits at top and bottom:
			//     loop
			//       exit when condition_1;
			//       ...
			//       exit when condition_2;
			//     end loop;
			//
			//     TOP-TESTING:
			//     while condition loop
			//       ...
			//     end loop;
			//
			//     BOTTOM-TESTING:
			//     loop
			//       ...
			//       exit when condition;
			//     end loop;
			//
			//     If the top-testing loop has a finite, constant number of iterations, we could use the for-loop instead of the while-loop.
			bool DoubleTailFlag = CurrBlock->IsDoubleLoopTailBlock();
			int HeaderExitFollowNum = SMP_BLOCKNUM_UNINIT, TailExitFollowNum = SMP_BLOCKNUM_UNINIT;
			bool HeaderBlockExitsLoop = this->DoesBlockExitLoop(LoopNumber, HeadBlock, HeaderExitFollowNum);
			bool TailBlockExitsLoop = this->DoesBlockExitLoop(LoopNumber, CurrBlock, TailExitFollowNum);
			if (DoubleTailFlag) {
				// FollowBlockNum should be in our local map.
				map<int, int>::const_iterator MapIter = DoubleTailBlockNumToFollowBlockMap.find(TailBlockNum);
				assert(MapIter != DoubleTailBlockNumToFollowBlockMap.cend());
				TailExitFollowNum = (*MapIter).second;
				HeaderExitFollowNum = TailExitFollowNum; // simplify code below
			}

			this->ClassifyLoop(LoopNumber, HeaderExitFollowNum, TailExitFollowNum, CurrBlock, HeadBlock, HeaderBlockExitsLoop, TailBlockExitsLoop);
			if (DoubleTailFlag) {
				// Classify the outer loop.
				HeadBlockNum = CurrBlock->GetOuterLoopHeaderNumberForDoubleTailBlock();
				HeadBlock = this->GetBlockByNum((std::size_t) HeadBlockNum);
				bool FoundHeader = this->IsBlockNumLoopHeader(HeadBlockNum, LoopNumber);
				assert(FoundHeader);
				this->ClassifyLoop(LoopNumber, HeaderExitFollowNum, TailExitFollowNum, CurrBlock, HeadBlock, HeaderBlockExitsLoop, TailBlockExitsLoop);
			}
		} // end if tail block
	} // end for all blocks

	return;
} // end of SMPFunction::DetectLoops()

void SMPFunction::ClassifyLoop(size_t LoopNumber, int HeaderExitFollowNum, int TailExitFollowNum, SMPBasicBlock *CurrBlock, SMPBasicBlock *HeadBlock, bool HeaderBlockExitsLoop, bool TailBlockExitsLoop) {
	if (HeaderBlockExitsLoop && HeadBlock->HasIndirectJump()) {
		this->HasStructuredCFG = false;
		SMP_msg("ERROR: SPARK: Indirect jump head loop exit at %llx for loop %d in %s\n",
			(uint64_t)CurrBlock->GetLastAddr(), LoopNumber, this->GetFuncName());
	}
	else if (TailBlockExitsLoop && CurrBlock->HasIndirectJump()) {
		this->HasStructuredCFG = false;
		SMP_msg("ERROR: SPARK: Indirect jump tail loop exit at %llx for loop %d in %s\n",
			(uint64_t)CurrBlock->GetLastAddr(), LoopNumber, this->GetFuncName());
	}

	if (TailBlockExitsLoop) {
		if (HeaderBlockExitsLoop) {
			// Conditional exits at top and bottom of loop.
			this->LoopTypesByLoopNum[LoopNumber] = STARS_BOTTOM_TESTING_LOOP;
			if (TailExitFollowNum != HeaderExitFollowNum) {
				this->HasStructuredCFG = false; // inconsistent follow nodes, e.g. multi-level loop exit
				SMP_msg("ERROR: SPARK: Conflicting head and tail loop follow block nums for loop %d : %d and %d in %s\n",
					LoopNumber, HeaderExitFollowNum, TailExitFollowNum, this->GetFuncName());
			}
		}
		else {
			// Normal bottom-testing loop.
			this->LoopTypesByLoopNum[LoopNumber] = STARS_BOTTOM_TESTING_LOOP;
		}
		this->LoopTestBlocksByLoopNum[LoopNumber] = CurrBlock->GetNumber();
		this->UpdateLoopFollowBlockNum(HeadBlock->GetNumber(), TailExitFollowNum);
	}
	else {
		if (HeaderBlockExitsLoop) {
			// Normal top-testing while or for loop.
			this->LoopTypesByLoopNum[LoopNumber] = STARS_TOP_TESTING_LOOP;
			this->LoopTestBlocksByLoopNum[LoopNumber] = HeadBlock->GetNumber();
			this->UpdateLoopFollowBlockNum(HeadBlock->GetNumber(), HeaderExitFollowNum);
		}
		else {
			// Infinite or middle-testing loop.
			this->LoopTypesByLoopNum[LoopNumber] = STARS_INFINITE_OR_MIDDLE_TESTING_LOOP;
			this->LoopTestBlocksByLoopNum[LoopNumber] = SMP_BLOCKNUM_UNINIT;
		}
	}
	return;
} // end of SMPFunction::ClassifyLoop()

// return true if block can exit the loop.
bool SMPFunction::DoesBlockExitLoop(std::size_t LoopNumber, SMPBasicBlock *LoopBlock, int &FollowBlockNum) {
	bool FoundExitSuccessor = false;

	for (list<SMPBasicBlock *>::iterator SuccIter = LoopBlock->GetFirstSucc(); SuccIter != LoopBlock->GetLastSucc(); ++SuccIter) {
		SMPBasicBlock *SuccBlock = (*SuccIter);
		int BlockNum = SuccBlock->GetNumber();
		if (!(this->IsBlockInLoop(BlockNum, LoopNumber))) {
			if (!LoopBlock->IsDoubleLoopTailBlock()) { // normal case
				FoundExitSuccessor = true;
				FollowBlockNum = BlockNum;
				break;
			}
			else { // Double tail block
				FoundExitSuccessor = true;
				FollowBlockNum = SMP_BLOCKNUM_UNINIT; // will be ignored by DetectLoops() caller
				break;
			}
		}
	}

	return FoundExitSuccessor;
} // end of SMPFunction::DoesBlockExitLoop()


// Collect a set of loop-variant DEFs with the inst IDs of the DEFs.
void SMPFunction::DetectLoopInvariantDEFs(void) {
	bool UseFP = this->UsesFramePointer();
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		set<STARS_ea_t> DummySet;
		this->LoopInvariantDEFs.push_back(DummySet); // allocate set in LoopInvariantDEFs vector

		list<size_t> BlockList;
		this->BuildLoopBlockList(LoopIndex, BlockList);

		assert(LoopIndex < ((size_t) this->LoopHeadBlockNumbers.size()));
		int HeadBlockNum = this->LoopHeadBlockNumbers[LoopIndex];

		list<size_t>::const_iterator BlockNumIter;
		for (BlockNumIter = BlockList.cbegin(); BlockNumIter != BlockList.cend(); ++BlockNumIter) {
			size_t BlockNum = (*BlockNumIter);
			SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);

			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->GetFirstNonFlagsDef();
				if (DefIter == CurrInst->GetLastDef())
					continue;  // only the flags were defined
				STARSOpndTypePtr DefOp = DefIter->GetOp();
				// Only care about non-control-flow statements.
				if ((DEFAULT == CurrInst->GetDataFlowType()) && CurrInst->IsAnalyzeable() && MDIsDataFlowOpnd(DefOp, UseFP)) {
					STARS_ea_t DefAddr = CurrInst->GetAddr();
					// Iterate through USEs and find variant/invariant names.
					bool VariantUseFound = false;
					// Examine all USEs. A DEF is only loop invariant if the USEs that helped define it are also loop invariant.
					for (set<DefOrUse, LessDefUse>::iterator UseIter = CurrInst->GetFirstUse(); UseIter != CurrInst->GetLastUse(); ++UseIter) {
						STARSOpndTypePtr UseOp = UseIter->GetOp();
						bool DirectStackAccess = MDIsDirectStackAccessOpnd(UseOp, UseFP);
						bool UnanalyzableUse;
						if (global_STARS_program->ShouldSTARSTranslateToSPARKAda()) {
							// Include direct stack accesses for SPARK analyses
							UnanalyzableUse = (UseOp->IsMemOp() && (!DirectStackAccess))
								|| (UseOp->GetReg() == STARS_x86_R_ip); // instruction pointer a.k.a. program counter
						}
						else {
							// Assume memory operands are loop variant until we have full alias analysis.
							UnanalyzableUse = UseOp->IsMemOp() || (UseOp->GetReg() == STARS_x86_R_ip);
						}
						if (UnanalyzableUse) {
							VariantUseFound = true;
							break;
						}
						else if (UseOp->IsImmedOp() || UseOp->IsVoidOp()) {
							; // immediates are always loop-invariant
						}
						else if (UseOp->MDIsSpecialRegOpType()) {
							// Probably should do data flow analysis on these just like regular regs in the future. !!!!****!!!!
							VariantUseFound = true;
							break;
						}
						else if (UseOp->IsRegOp() || DirectStackAccess) {
							// Find out if DEF is in a Phi function for the loop header, or otherwise outside the loop.
							bool LocalName = CurrBlock->IsLocalName(UseOp);
							// Invariant tests:
							// 1. If LocalName, UseOp is defined in the loop: VARIANT. For GlobalName, go to step 2.
							// 2. Find DEF of this global USE.
							//  a. If DEF is inside the loop, including internal block Phi functions, VARIANT.
							//  b. If DEF is outside the loop, this USE is loop-invariant, continue to analyze remaining USEs.
							//  c. If DEF is in loop header block Phi function, check all Phi USEs. If any Phi USE for
							//      the DEF of UseOp is inside the loop, then UseOp is VARIANT, else continue to remaining USEs.
							int UseSSANum = UseIter->GetSSANum();
							STARS_ea_t UseDefAddr = CurrBlock->GetDefAddrFromUseAddr(UseOp, DefAddr, UseSSANum, LocalName);
							bool UseDefInPhiFunction = STARS_IsBlockNumPseudoID(UseDefAddr);
							if (UseDefInPhiFunction) {// found in Phi DEF; DefAddr encodes block #
								size_t UseDefBlockNum = STARS_GetBlockNumFromPseudoID(UseDefAddr);
								// Is UseDefBlockNum inside our loop?
								if (this->IsBlockInLoop(UseDefBlockNum, LoopIndex)) {
									// Phi DEF is inside loop, but if it is in the loop header block, it is
									//  still possible that the Phi DEF is loop-invariant in the special case
									//  where the loop header block is the join point for a previous if-then-else
									//  and the Phi function is joining loop-invariant values, as opposed to the
									//  common case in which a loop header block Phi function joins an incoming value
									//  to a loop-variant value. So, we must examine the loop variance of the Phi USEs
									//  only if UseDefBlockNum is the loop header block number.
									if (HeadBlockNum != ((int) UseDefBlockNum)) {
										VariantUseFound = true;
										break;
									}
									else { // header block Phi function; must examine all Phi USEs
										SMPBasicBlock *PhiBlock = this->GetBlockByNum(UseDefBlockNum);
										set<SMPPhiFunction, LessPhi>::iterator PhiIter = PhiBlock->FindPhi(UseOp);
										assert(PhiIter != PhiBlock->GetLastPhi());
										size_t PhiLimit = PhiIter->GetPhiListSize();
										for (size_t PhiIndex = 0; PhiIndex < PhiLimit; ++PhiIndex) {
											int PhiUseSSANum = PhiIter->GetUseSSANum(PhiIndex);
											STARS_ea_t PhiUseDefAddr = this->GetGlobalDefAddr(UseOp, PhiUseSSANum);
											if (STARS_IsBlockNumPseudoID(PhiUseDefAddr)) { // found in Phi DEF; addr encodes block #
												// DEF of current Phi USE found in previous block Phi function; must be loop-variant
												//  if the DEF block is inside our loop, i.e. header block of current loop has a Phi USE
												//  whose DEF is some join point inside the loop, like an if-then-else join point.
												size_t PhiUseDefBlockNum = STARS_GetBlockNumFromPseudoID(PhiUseDefAddr);
												if (this->IsBlockInLoop(PhiUseDefBlockNum, LoopIndex)) {
													VariantUseFound = true;
													break;
												}
											}
											else if (STARS_BADADDR == PhiUseDefAddr) {
												VariantUseFound = true;
												break;
											}
											else { // found instruction addr
												SMPBasicBlock *PhiUseDefBlock = this->GetBlockFromInstAddr(PhiUseDefAddr);
												size_t PhiUseDefBlockNum = PhiUseDefBlock->GetNumber();
												if (this->IsBlockInLoop(PhiUseDefBlockNum, LoopIndex)) {
													// Phi is normal case of joining incoming DEF to a DEF inside the loop.
													//  Current Phi Use is the one defined inside loop.
													VariantUseFound = true;
													break;
												}
											}
										} // end for all Phi USEs
									}
								}
							} // end if UseDefInPhiFunction
							else {
								bool UseDefInvariant = (this->LoopInvariantDEFs[LoopIndex].find(UseDefAddr) != this->LoopInvariantDEFs[LoopIndex].end());
								if (UseDefInvariant) {
									; // we already know the current USE is loop-invariant for the current loop
									  //  because we found the DEF in the LoopInvariantDEFs set
								}
								else if (LocalName) { // block-local DEFs in a loop are always loop-variant
									VariantUseFound = true;
									break;
								}
								else { // we have an instruction ID, not a block number, for UseDefAddr
									SMPBasicBlock *UseDefBlock = this->GetBlockFromInstAddr(UseDefAddr);
									size_t UseDefBlockNum = UseDefBlock->GetNumber();
									if (this->IsBlockInLoop(UseDefBlockNum, LoopIndex)) {
										VariantUseFound = true;
										break;
									}
								}
							}
						}
						else {
							SMP_msg("ERROR: Unknown operand type at %llx in DetectLoopInvariantDEFs\n", (unsigned long long) DefAddr);
							VariantUseFound = true;
							break;
						}
					} // end for all USEs

					if (!VariantUseFound) {
						pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->LoopInvariantDEFs[LoopIndex].insert(DefAddr);
						++LoopInvariantDEFCount;
#if STARS_DEBUG_LOOP_INVARIANTS
						SMP_msg("INFO: Invariant DEF at %llx LoopHeadAddr: %llx\n", (unsigned long long) DefAddr,
							(unsigned long long) this->GetBlockByNum(HeadBlockNum)->GetFirstAddr());
#endif
					}
				}
			} // end for all insts in block
		} // end for all blocks in loop
	} // end for all loops in function

	int CurrentBlockNum = SMP_BLOCKNUM_UNINIT;
	list<size_t> LoopList;
	for (size_t LoopNum = 0; LoopNum < this->LoopCount; ++LoopNum) {
		// Detect DEFs that are invariant for all loops containing the DEF.
		for (set<STARS_ea_t>::const_iterator DefAddrIter = this->LoopInvariantDEFs[LoopNum].cbegin();
			DefAddrIter != this->LoopInvariantDEFs[LoopNum].cend(); ++DefAddrIter) {
			STARS_ea_t CurrDefAddr = (*DefAddrIter);
			SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(CurrDefAddr);
			assert(NULL != CurrBlock);
			if (CurrBlock->GetNumber() != CurrentBlockNum) { // cached LoopList is stale
				CurrentBlockNum = CurrBlock->GetNumber();
				LoopList.clear();
				this->BuildLoopList((int) CurrentBlockNum, LoopList);
			}
			bool InvariantForAllLoops = true;
			for (list<size_t>::const_iterator LoopListIter = LoopList.cbegin(); LoopListIter != LoopList.cend(); ++LoopListIter) {
				size_t LoopNum = (*LoopListIter);
				if (this->LoopInvariantDEFs[LoopNum].find(CurrDefAddr) == this->LoopInvariantDEFs[LoopNum].end()) {
					InvariantForAllLoops = false; // One loop containing CurrDefAddr does not have CurrDefAddr as invariant
					break;
				}
			} // end for all loops in LoopList for current block
			if (InvariantForAllLoops) {
				vector<SMPInstr *>::iterator InstIter = CurrBlock->GetInstIterFromAddr(CurrDefAddr);
				assert(InstIter != CurrBlock->GetLastInst());
				SMPInstr *CurrInst = (*InstIter);
				STARSDefUseIter DefIter = CurrInst->GetFirstNonFlagsDef();
				assert(DefIter != CurrInst->GetLastDef());
				DefIter = CurrInst->SetDefLoopInvariant(DefIter->GetOp());
			}
		} // end for all addresses in current loop
	} // end for all loops

	return;
} // end of SMPFunction::DetectLoopInvariantDEFs()

// Track UseOp to its DEF, see if loop-invariant for LoopIndex
bool SMPFunction::IsUseLoopInvariantDEF(std::size_t LoopIndex, const STARSOpndTypePtr &UseOp, SMPInstr *UseInst) {
	bool UseDefInvariant = false;

	if (!this->LoopInvariantDEFs.empty() && (!this->LoopInvariantDEFs[LoopIndex].empty())) {
		if (!UseOp->IsMemOp()) {
			STARSOpndTypePtr SearchOp = CloneIfNecessary(UseOp, this->UsesFramePointer());
			CanonicalizeOpnd(SearchOp);
			bool LocalName = UseInst->GetBlock()->IsLocalName(SearchOp);
			STARSDefUseIter UseIter = UseInst->FindUse(SearchOp);
			if (UseIter != UseInst->GetLastUse()) {
				int UseSSANum = UseIter->GetSSANum();
				STARS_ea_t UseDefAddr = UseInst->GetBlock()->GetDefAddrFromUseAddr(SearchOp, UseInst->GetAddr(), UseSSANum, LocalName);
				UseDefInvariant = (this->LoopInvariantDEFs[LoopIndex].find(UseDefAddr) != this->LoopInvariantDEFs[LoopIndex].end());
			}
		}
		else {
			// Check all address regs in the memory operand
			bool UseFP = this->UsesFramePointer();
			int BaseReg, IndexReg;
			uint16_t ScaleFactor;
			STARS_ea_t offset;
			UseOp->MDExtractAddressFields(BaseReg, IndexReg, ScaleFactor, offset);
			if ((BaseReg != STARS_x86_R_none) && (!MDIsStackPtrReg(BaseReg, UseFP))) {
				STARSOpndTypePtr BaseOp = UseInst->MakeRegOpnd((STARS_regnum_t)BaseReg);
				STARSDefUseIter UseIter = UseInst->FindUse(BaseOp);
				if (UseIter != UseInst->GetLastUse()) {
					bool LocalName = UseInst->GetBlock()->IsLocalName(BaseOp);
					int UseSSANum = UseIter->GetSSANum();
					STARS_ea_t UseDefAddr = UseInst->GetBlock()->GetDefAddrFromUseAddr(BaseOp, UseInst->GetAddr(), UseSSANum, LocalName);
					UseDefInvariant = (this->LoopInvariantDEFs[LoopIndex].find(UseDefAddr) != this->LoopInvariantDEFs[LoopIndex].end());
				}
				if (!UseDefInvariant) {
					return false;
				}
			}
			if ((IndexReg != STARS_x86_R_none) && (!MDIsStackPtrReg(IndexReg, UseFP))) {
				STARSOpndTypePtr IndexOp = UseInst->MakeRegOpnd((STARS_regnum_t)IndexReg);
				STARSDefUseIter UseIter = UseInst->FindUse(IndexOp);
				if (UseIter != UseInst->GetLastUse()) {
					bool LocalName = UseInst->GetBlock()->IsLocalName(IndexOp);
					int UseSSANum = UseIter->GetSSANum();
					STARS_ea_t UseDefAddr = UseInst->GetBlock()->GetDefAddrFromUseAddr(IndexOp, UseInst->GetAddr(), UseSSANum, LocalName);
					UseDefInvariant = (this->LoopInvariantDEFs[LoopIndex].find(UseDefAddr) != this->LoopInvariantDEFs[LoopIndex].end());
				}
			}
		}
	}
	return UseDefInvariant;
} // end of SMPFunction::IsUseLoopInvariantDEF()

// find loop # corresponding to header block num
size_t SMPFunction::FindLoopNumFromHeadBlockNum(int LoopHeaderBlockNum) const {
	for (size_t LoopIndex = 0; LoopIndex < this->LoopHeadBlockNumbers.size(); ++LoopIndex) {
		if (this->LoopHeadBlockNumbers[LoopIndex] == LoopHeaderBlockNum) {
			return LoopIndex;
		}
	}
	assert(false); // should always find the loop if given a proper head block number
}

// Detect basic and dependent loop induction variable families for all loops
void SMPFunction::DetectLoopInductionVars(void) {
	bool UseFP = this->UsesFramePointer();
	bool VerboseOutput = global_stars_interface->VerboseLoopsMode();

	// Initialize induction var vector with entries that have BIV*0 and SMP_SSA_UNINIT as markers
	this->LoopInductionVars.resize(this->LoopCount);
	struct InductionVarTriple DummyBIV;
	SMPInstr *FirstInst = this->GetInstFromAddr(this->GetFirstFuncAddr());
	STARSOpndTypePtr ZeroOp = FirstInst->MakeImmediateOpnd(0);
	DefOrUse MultUse(ZeroOp, UNKNOWN);
	DummyBIV.Multiplier = MultUse;
	struct InductionVarFamily DummyFamily;
	DummyFamily.BIVIncomingSSANum = SMP_SSA_UNINIT;
	DummyFamily.BasicInductionVar = DummyBIV;
	DummyFamily.BIVInitExpr = nullptr;
	DummyFamily.BIVLimitExpr = nullptr;
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		this->LoopInductionVars[LoopIndex].push_back(DummyFamily);
	}

	// Go through the Phi functions in the header blocks for all loops to search for basic induction var candidates.
	for (size_t LoopIndex = 0; LoopIndex < this->LoopHeadBlockNumbers.size(); ++LoopIndex) {
		// A basic induction variable meets the following criteria:
		//  1. Defined by a Phi function in the loop header block.
		//  2. Phi function has two Phi USEs, one from a DEF outside the loop, one from a DEF inside the loop.
		//  3. The DEF inside the loop is a simple +/-/*/negate function of the Phi DEF and loop invariant values.
		// Note that some texts say "constants" instead of "loop invariant values" in #3, but that is too restrictive.
		// We will limit ourselves to basic induction var registers for the present time.
		// UPDATE: We allow more than 2 Phi USEs, as long as each inside-the-loop Phi USE is the
		//  same induction var arithmetic.
		int HeaderBlockNum = this->LoopHeadBlockNumbers[LoopIndex];
		bool FoundBIV = false;
		SMPBasicBlock *HeaderBlock = this->GetBlockByNum((size_t) HeaderBlockNum);
		// Look at all register Phi functions in the HeaderBlock.
		for (PhiSetIter PhiIter = HeaderBlock->GetFirstPhi(); PhiIter != HeaderBlock->GetLastPhi(); ++PhiIter) {
			struct InductionVarTriple BIVTriple;
			struct InductionVarFamily CurrentFamily;
			STARSOpndTypePtr PhiDefOp = PhiIter->GetAnyOp();
			size_t PhiUseListSize = PhiIter->GetPhiListSize();
			CurrentFamily.BIVInitExpr = nullptr;
			CurrentFamily.BIVLimitExpr = nullptr;
			if (!MDIsDataFlowOpnd(PhiDefOp, UseFP))
				continue;
			bool InsideDEFFound = false;
			bool OutsideDEFFound = false;
			bool InsideDEFIsInst = false;
			size_t InsideBIVFoundCount = 0;
			STARS_ea_t InsideDefAddr = STARS_BADADDR;
			STARS_ea_t OutsideDefAddr = STARS_BADADDR;
			int OutsideSSANum = -1;
			STARS_ea_t UseDefAddr = STARS_BADADDR;
			int PhiUseDefBlockNum1 = SMP_BLOCKNUM_UNINIT;
			for (size_t PhiUseIndex = 0; PhiUseIndex < PhiUseListSize; ++PhiUseIndex) {
				bool CurrentDEFIsInst = false;
				DefOrUse PhiUse = PhiIter->GetPhiRef(PhiUseIndex);
				int PhiUseSSANum = PhiIter->GetUseSSANum(PhiUseIndex);
				if (0 > PhiUseSSANum) // bad path or uninitialized Phi USE
					break;
				UseDefAddr = this->GetGlobalDefAddr(PhiDefOp, PhiUseSSANum);
				assert(STARS_BADADDR != UseDefAddr);
				if (STARS_IsSSAMarkerPseudoID(UseDefAddr))
					PhiUseDefBlockNum1 = 0;
				else if (STARS_IsBlockNumPseudoID(UseDefAddr))
					PhiUseDefBlockNum1 = STARS_GetBlockNumFromPseudoID(UseDefAddr);
				else {
					CurrentDEFIsInst = true;
					PhiUseDefBlockNum1 = this->GetBlockNumFromInstAddr(UseDefAddr);
				}
				if (this->IsBlockInLoop(PhiUseDefBlockNum1, LoopIndex)) {
					InsideDEFFound = true;
					InsideDefAddr = UseDefAddr;
					InsideDEFIsInst = CurrentDEFIsInst;
					if (!InsideDEFIsInst)
						break; // Cannot analyze internal Phi DEFs right now.
					CurrentFamily.BIVInsideLoopDefAddrs.push_back(InsideDefAddr);
				}
				else {
					OutsideDEFFound = true;
					OutsideSSANum = PhiUseSSANum;
					OutsideDefAddr = UseDefAddr;
				}
			} // end for all Phi USEs
			if (!(InsideDEFFound && OutsideDEFFound))
				continue;
			if (!InsideDEFIsInst)
				continue; // Need linear function statement DEF in loop, not another Phi function DEF.
			if ((1 + CurrentFamily.BIVInsideLoopDefAddrs.size()) != PhiUseListSize)
				continue; // need one outside DEF, rest inside DEFs

			// We have satisfied criteria 1 and 2 if we reach this point. Now for criterion 3.
			//  For each InsideDefAddr, is there an inst that USEs the Phi DEF and DEFs the
			//  inside Phi USE using simple induction var arithmetic?
			for (size_t InsideIndex = 0; InsideIndex < CurrentFamily.BIVInsideLoopDefAddrs.size(); ++InsideIndex) {
				InsideDefAddr = CurrentFamily.BIVInsideLoopDefAddrs[InsideIndex];
				SMPInstr *InsideDefInst = this->GetInstFromAddr(InsideDefAddr);
				STARSDefUseIter UseIter = InsideDefInst->FindUse(PhiDefOp);
				if ((UseIter != InsideDefInst->GetLastUse()) && (UseIter->GetSSANum() == PhiIter->GetDefSSANum())) {
					STARSOpndTypePtr SecondOp = nullptr;
					SMPoperator BIVOperator = SMP_NULL_OPERATOR;
					STARSDefUseIter SecondOpDefIter;
					if (InsideDefInst->IsBasicInductionVarArithmetic(SecondOp, BIVOperator)) {
						// Last question: Is the "y" operand in x := x op y loop-invariant?
						bool BIVarFound = ((nullptr != SecondOp) && SecondOp->IsImmedOp());
						int SecondOpSSANum = SMP_SSA_UNINIT;
						if (!BIVarFound && MDIsDataFlowOpnd(SecondOp, UseFP) && this->IsGlobalName(SecondOp)) {
							STARSOpndTypePtr SearchOp = CloneIfNecessary(SecondOp, UseFP);
							CanonicalizeOpnd(SearchOp);
							STARSDefUseIter SecondOpUseIter = InsideDefInst->FindUse(SearchOp);
							assert(SecondOpUseIter != InsideDefInst->GetLastUse());
							SecondOpSSANum = SecondOpUseIter->GetSSANum();

							// Last chance to detect BIV: Is SecondOp a USE of a loop-invariant DEF?
							//  Note that block-local names are loop-variant and are already excluded.
							SMPBasicBlock *InsideDefBlock = this->GetBlockByNum((size_t)PhiUseDefBlockNum1);
							assert(NULL != InsideDefBlock);
							STARS_ea_t SecondOpDefAddr = InsideDefBlock->GetDefAddrFromUseAddr(SearchOp, InsideDefAddr, SecondOpUseIter->GetSSANum(), false);
							assert(STARS_BADADDR != SecondOpDefAddr);
							int SecondOpDefBlockNum = SMP_BLOCKNUM_UNINIT;
							bool SecondOpDEFIsInst = false;
							if (STARS_IsSSAMarkerPseudoID(SecondOpDefAddr)) { // SSA Marker inst
								SecondOpDefBlockNum = 0;
								assert(SecondOpSSANum == 0);
							}
							else if (STARS_IsBlockNumPseudoID(SecondOpDefAddr)) {
								SecondOpDefBlockNum = STARS_GetBlockNumFromPseudoID(SecondOpDefAddr);
							}
							else {
								SecondOpDEFIsInst = true;
								SecondOpDefBlockNum = this->GetBlockNumFromInstAddr(SecondOpDefAddr);
							}
							if (this->IsBlockInLoop(SecondOpDefBlockNum, LoopIndex)) {
								if (SecondOpDEFIsInst) {
									// See if SecondOpDEF inst is on the loop-invariant DEFs list.
									if (this->LoopInvariantDEFs[LoopIndex].find(SecondOpDefAddr) != this->LoopInvariantDEFs[LoopIndex].end()) {
										BIVarFound = true;
									}
								}
							}
							else { // DEF of SecondOp is outside the loop entirely
								BIVarFound = true;
							}
						}
						if (BIVarFound) {
							DefOrUse PhiDef(PhiDefOp, PhiIter->GetDefType(), PhiIter->GetDefSSANum());
							BIVTriple.InductionVar = PhiDef;
							BIVTriple.SubtractAddend = (SMP_SUBTRACT == BIVOperator);
							if ((SMP_ADD == BIVOperator) || (SMP_SUBTRACT == BIVOperator)) {
								// Adding an operand to itself is a strength reduction of multiplying by two.
								if (IsEqOpIgnoreBitwidth(PhiDefOp, SecondOp)) {
									if (SMP_ADD == BIVOperator) { // SMP_SUBTRACT would be zeroing out, not a progression.
										// Make triple: 2*InductionVar + 0
										STARSOpndTypePtr MultiplierOp = InsideDefInst->MakeImmediateOpnd(2);
										DefOrUse MultUse(MultiplierOp, NUMERIC);
										BIVTriple.Multiplier = MultUse;
										STARSOpndTypePtr AddendOp = InsideDefInst->MakeImmediateOpnd(0);
										DefOrUse AddendUse(AddendOp, NUMERIC);
										BIVTriple.Addend = AddendUse;
									}
								}
								else {
									// Make triple: 1*InductionVar + SecondOp
									DefOrUse AddendUse(SecondOp, NUMERIC, SecondOpSSANum);
									BIVTriple.Addend = AddendUse;
									STARSOpndTypePtr MultiplierOp = InsideDefInst->MakeImmediateOpnd(1);
									DefOrUse MultUse(MultiplierOp, NUMERIC);
									BIVTriple.Multiplier = MultUse;
								}
							}
							else if ((SMP_S_LEFT_SHIFT == BIVOperator) || (SMP_U_LEFT_SHIFT == BIVOperator)) {
								// Must be a shift left by constant number of bits, same as multiply.
								assert(SecondOp->IsImmedOp());
								// Make triple: (1 << SecondOp)*InductionVar + 0
								STARS_uval_t ShiftCount = SecondOp->GetImmedValue();
								STARS_uval_t MultiplierValue = (1 << ShiftCount);
								STARSOpndTypePtr AddendOp = InsideDefInst->MakeImmediateOpnd(0);
								DefOrUse AddendUse(AddendOp, NUMERIC);
								BIVTriple.Addend = AddendUse;
								STARSOpndTypePtr MultiplierOp = InsideDefInst->MakeImmediateOpnd(MultiplierValue);
								DefOrUse MultUse(MultiplierOp, NUMERIC, SecondOpSSANum);
								BIVTriple.Multiplier = MultUse;
							}
							else {
								assert((SMP_U_MULTIPLY == BIVOperator) || (SMP_S_MULTIPLY == BIVOperator));
								// Make triple: SecondOp*InductionVar + 0
								STARSOpndTypePtr AddendOp = InsideDefInst->MakeImmediateOpnd(0);
								DefOrUse AddendUse(AddendOp, NUMERIC);
								BIVTriple.Addend = AddendUse;
								DefOrUse MultUse(SecondOp, NUMERIC, SecondOpSSANum);
								BIVTriple.Multiplier = MultUse;
							}
							if (0 < InsideBIVFoundCount) { // not first BIV candidate
								// Must match previous candidates.
								if (EqualInductionVars(BIVTriple, CurrentFamily.BasicInductionVar)) {
									// All is well; no need to update.
									FoundBIV = true;
									++InsideBIVFoundCount;
								}
								else {
									FoundBIV = false;
									SMP_msg("ERROR: Non-matching BIV candidates in loop %d in func %s\n",
										LoopIndex, this->GetFuncName());
									break;
								}
							}
							else { // First BIV candidate
								CurrentFamily.BIVIncomingSSANum = OutsideSSANum;
								CurrentFamily.BIVIncomingDefAddr = OutsideDefAddr;
								CurrentFamily.BasicInductionVar = BIVTriple;
								FoundBIV = true;
								++InsideBIVFoundCount;
							}
							if (FoundBIV && VerboseOutput) {
								SMP_msg("INFO: Basic Induction Var for func at %llx for Loop %d :", (uint64_t) this->GetFirstFuncAddr(), LoopIndex);
								DumpInductionVar(BIVTriple);
							}
						}
					} // end if BasicInductionVarArithmetic found
				} // end if we found USE of Phi DEF
			} // end for all values of InsideIndex
			if (InsideBIVFoundCount == CurrentFamily.BIVInsideLoopDefAddrs.size()) {
				this->LoopInductionVars[LoopIndex].push_back(CurrentFamily);
				SMP_msg("INFO: BIVFoundCount success for func at %llx for Loop %d \n", (uint64_t) this->GetFirstFuncAddr(), LoopIndex);
			}
		} // end for all Phi functions in current loop header block
		if (!FoundBIV) {
			SMP_msg("ERROR: LOOP: BIV not found for loop %d at %llx in func %s\n", LoopIndex,
				(uint64_t) HeaderBlock->GetFirstAddr(), this->GetFuncName());
			++STARS_LoopInductionVarIDFailures;
		}
		else {
			++STARS_LoopInductionVarIDSuccesses;
		}
	} // end for all loops

#if 1
	// Look for dependent induction variables, which are linear functions of other induction variables.
	list<size_t> BlockList;
	for (size_t LoopIndex = 0; LoopIndex < this->LoopHeadBlockNumbers.size(); ++LoopIndex) {
		this->BuildLoopBlockList(LoopIndex, BlockList);
		for (list<size_t>::const_iterator BlockIter = BlockList.cbegin(); BlockIter != BlockList.cend(); ++BlockIter) {
			SMPBasicBlock *CurrBlock = this->GetBlockByNum(*BlockIter);
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				STARSOpndTypePtr Mult1 = nullptr, Mult2 = nullptr, Add1 = nullptr, Add2 = nullptr;
				SMPoperator RhsOperator = SMP_NULL_OPERATOR;

				if (CurrInst->IsDependentInductionVarArithmetic(Mult1, Mult2, Add1, Add2, RhsOperator)) {
					// See if one of Mult1, Mult2, Add1 or Add2 is a basic induction variable.
					struct DependentInductionVar DIV;
					this->FindDependentInductionVar(LoopIndex, DIV, Add1, Add2, Mult1, Mult2, RhsOperator, CurrInst);
					if (VerboseOutput && (STARS_BADADDR != DIV.DIVDefAddr)) {
						SMP_msg("INFO: Dependent Induction Variable for loop # %d found at %llx\n", LoopIndex, (uint64_t)CurrInst->GetAddr());
						PrintOperand(DIV.DIV.GetOp());
						SMP_msg("\n");
						DumpInductionVar(DIV.IVExpr);
					}
				}
			}
		}

		BlockList.clear();
	}
#endif

	return;
} // end of SMPFunction::DetectLoopInductionVars()

// Is CurrOp a BIV or DIV for loop with LoopIndex? return iterator and position in family if true,
//  where we signal BIV with FamilyIndex == 0 and DIV with FamilyIndex == 1+vectorindex.
//  Do not modify ListIter or FamilyIndex if we are returning false.
bool SMPFunction::IsLoopInductionVar(std::size_t LoopIndex, STARSOpndTypePtr &CurrOp, SMPInstr *UseInst, STARSInductionVarFamilyIter &ListIter, std::size_t &FamilyIndex) {
	STARSInductionVarFamilyList::iterator TempIter;
	int CurrOpSSANum;
	STARSDefUseIter UseIter = UseInst->FindUse(CurrOp);
	if (UseIter == UseInst->GetLastUse())
		return false;
	else if (this->LoopInductionVars.size() <= LoopIndex)
		return false; // haven't done induction var analysis for this loop yet
	else
		CurrOpSSANum = UseIter->GetSSANum();

	for (TempIter = this->LoopInductionVars[LoopIndex].begin(); TempIter != this->LoopInductionVars[LoopIndex].end(); ++TempIter) {
		if (0 > TempIter->BIVIncomingSSANum)  // uninitialized entry
			continue;
		size_t TempIndex = 0;
		if ((CurrOpSSANum == TempIter->BasicInductionVar.InductionVar.GetSSANum()) && IsEqOpIgnoreBitwidth(CurrOp, TempIter->BasicInductionVar.InductionVar.GetOp())) {
			// CurrOp matches the basic induction var.
			ListIter = TempIter;
			FamilyIndex = TempIndex;
			return true;
		}
		else {
			for (TempIndex = 1; TempIndex <= TempIter->DependentInductionVars.size(); ++TempIndex) {
				int DIVSSANum = TempIter->DependentInductionVars[TempIndex - 1].DIV.GetSSANum();
				if ((CurrOpSSANum == DIVSSANum) && IsEqOpIgnoreBitwidth(CurrOp, TempIter->DependentInductionVars[TempIndex - 1].DIV.GetOp())) {
					ListIter = TempIter;
					FamilyIndex = TempIndex;
					return true;
				}
			}
		}
	}
	return false;
} // end of SMPFunction::IsLoopInductionVar()

// Is CurrOp a BIV or DIV for loop with LoopIndex? return iterator and position in family if true,
//  where we signal BIV with FamilyIndex == 0 and DIV with FamilyIndex == 1+vectorindex.
//  Do not modify ListIter or FamilyIndex if we are returning false.
// Same as IsLoopInductionVar() without requiring any SSA nums to match, i.e. is CurrOp an IV.
bool SMPFunction::IsLoopInductionVarForAnySSANum(std::size_t LoopIndex, const STARSOpndTypePtr &CurrOp, STARSInductionVarFamilyList::iterator &ListIter, std::size_t &FamilyIndex) {
	STARSInductionVarFamilyList::iterator TempIter;

	for (TempIter = this->LoopInductionVars[LoopIndex].begin(); TempIter != this->LoopInductionVars[LoopIndex].end(); ++TempIter) {
		if (0 > TempIter->BIVIncomingSSANum)  // uninitialized entry
			continue;
		size_t TempIndex = 0;
		if (IsEqOpIgnoreBitwidth(CurrOp, TempIter->BasicInductionVar.InductionVar.GetOp())) {
			// CurrOp matches the basic induction var.
			ListIter = TempIter;
			FamilyIndex = TempIndex;
			return true;
		}
		else {
			for (TempIndex = 1; TempIndex <= TempIter->DependentInductionVars.size(); ++TempIndex) {
				if (IsEqOpIgnoreBitwidth(CurrOp, TempIter->DependentInductionVars[TempIndex - 1].DIV.GetOp())) {
					ListIter = TempIter;
					FamilyIndex = TempIndex;
					return true;
				}
			}
		}
	}
	return false;
} // end of SMPFunction::IsLoopInductionVarForAnySSANum()

// For CurrOp in loop nest including UseInst, return iterator and position in family if true 
bool SMPFunction::IsLoopNestInductionVar(const STARSOpndTypePtr &CurrOp, SMPInstr *UseInst, STARSInductionVarFamilyIter &ListIter, std::size_t &FamilyIndex, int &LoopIndex) {
	bool success = false;
	int UseBlockNum = UseInst->GetBlock()->GetNumber();
	int InnerLoopIndex = this->GetInnermostLoopNum(UseBlockNum);
	int OuterLoopIndex = this->GetOutermostLoopNum(UseBlockNum);
	if ((0 <= InnerLoopIndex) && (0 <= OuterLoopIndex)) { // if block is in a loop
		if (InnerLoopIndex > OuterLoopIndex) { // swap directions
			for (int TempLoopIndex = InnerLoopIndex; !success && (TempLoopIndex >= OuterLoopIndex); --TempLoopIndex) {
				success = this->IsLoopInductionVarForAnySSANum((size_t)TempLoopIndex, CurrOp, ListIter, FamilyIndex);
				if (success)
					LoopIndex = TempLoopIndex;
			}
		}
		else {
			for (int TempLoopIndex = InnerLoopIndex; !success && (TempLoopIndex <= OuterLoopIndex); ++TempLoopIndex) {
				success = this->IsLoopInductionVarForAnySSANum((size_t)TempLoopIndex, CurrOp, ListIter, FamilyIndex);
				if (success)
					LoopIndex = TempLoopIndex;
			}
		}
	}
	return success;
} // end of SMPFunction::IsLoopNestInductionVar()

// pass in results of DefInst::IsDependentInductionVarArithmetic()
// Fill in DIV if DefInst creates a dependent induction var, else set DIV.DIVDefAddr to STARS_BADADDR to signal failure.
void SMPFunction::FindDependentInductionVar(std::size_t LoopIndex, struct DependentInductionVar &DIV, STARSOpndTypePtr Add1, STARSOpndTypePtr Add2, STARSOpndTypePtr Mult1, STARSOpndTypePtr Mult2, SMPoperator RhsOperator, SMPInstr *DefInst) {
	// Convert operands to constant operands if possible.
	bool NonConstAdd1 = true;
	bool NonConstAdd2 = true;
	bool NonConstMult1 = true;
	bool NonConstMult2 = true;
	DIV.DIVInitExpr = nullptr;
	DIV.DIVLimitExpr = nullptr;
	STARSOpndTypePtr IVOp = nullptr;
	size_t IVCounter = 0;
	STARSInductionVarFamilyIter IVFamilyIter;
	size_t FamilyIndex = 0;
	size_t OperandCount = 0;
	size_t LoopInvariantOperandCount = 0;
	bool UseFP = this->UsesFramePointer();
	STARS_ea_t DefAddr = DefInst->GetAddr();

	// We cannot handle operands that are not registers, immediates, or stack operands.
	if ((nullptr != Add1) && (!Add1->IsImmedOp()) && (!MDIsDataFlowOpnd(Add1, UseFP))) {
		DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
		return;
	}
	else if ((nullptr != Add2) && (!Add2->IsImmedOp()) && (!MDIsDataFlowOpnd(Add2, UseFP))) {
		DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
		return;
	}
	else if ((nullptr != Mult1) && (!Mult1->IsImmedOp()) && (!MDIsDataFlowOpnd(Mult1, UseFP))) {
		DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
		return;
	}
	else if ((nullptr != Mult2) && (!Mult2->IsImmedOp()) && (!MDIsDataFlowOpnd(Mult2, UseFP))) {
		DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
		return;
	}

	// Screen out induction variables already found by seeing if the DefInst address matches
	//  the InsideDefAddr of an existing induction variable. We don't want to say that a BIV
	//  is also a DIV that is a function of itself.
	for (size_t index = 0; index < this->LoopInductionVars.size(); ++index) {
		if (!this->LoopInductionVars[index].empty()) {
			STARSInductionVarFamilyList::const_iterator IVIter;
			for (IVIter = this->LoopInductionVars[index].cbegin(); IVIter != this->LoopInductionVars[index].cend(); ++IVIter) {
				if (IVIter->BIVIncomingSSANum >= 0) {
					vector<STARS_ea_t>::const_iterator AddrIter = std::find(IVIter->BIVInsideLoopDefAddrs.cbegin(), IVIter->BIVInsideLoopDefAddrs.cend(), DefAddr);
					bool FoundIVDefAddr = (AddrIter != IVIter->BIVInsideLoopDefAddrs.cend());
					if (FoundIVDefAddr) {
						DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
						return;
					}
				}
			}
		}
	}

	if (nullptr != Add1) {
		++OperandCount;
		if (Add1->IsImmedOp() || this->ReplaceLoopRegWithConst(LoopIndex, Add1, DefInst)) {
			NonConstAdd1 = false;
			++LoopInvariantOperandCount;
		}
		else {
			// Determine if Add1 is an induction variable or a loop-invariant DEF.
			if (this->IsLoopInductionVar(LoopIndex, Add1, DefInst, IVFamilyIter, FamilyIndex)) {
				IVOp = Add1;
				++IVCounter;
			}
			else if (this->IsUseLoopInvariantDEF(LoopIndex, Add1, DefInst)) {
				++LoopInvariantOperandCount;
			}
		}
	}
	if (nullptr != Add2) {
		++OperandCount;
		if (Add2->IsImmedOp() || this->ReplaceLoopRegWithConst(LoopIndex, Add2, DefInst)) {
			NonConstAdd2 = false;
			++LoopInvariantOperandCount;
		}
		else {
			// Determine if Add2 is an induction variable or a loop-invariant DEF.
			if (this->IsLoopInductionVar(LoopIndex, Add2, DefInst, IVFamilyIter, FamilyIndex)) {
				IVOp = Add2;
				++IVCounter;
			}
			else if (this->IsUseLoopInvariantDEF(LoopIndex, Add2, DefInst)) {
				++LoopInvariantOperandCount;
			}
		}
	}
	if (nullptr != Mult1) {
		++OperandCount;
		if (Mult1->IsImmedOp() || this->ReplaceLoopRegWithConst(LoopIndex, Mult1, DefInst)) {
			NonConstMult1 = false;
			++LoopInvariantOperandCount;
		}
		else {
			// Determine if Mult1 is an induction variable or a loop-invariant DEF.
			if (this->IsLoopInductionVar(LoopIndex, Mult1, DefInst, IVFamilyIter, FamilyIndex)) {
				IVOp = Mult1;
				++IVCounter;
			}
			else if (this->IsUseLoopInvariantDEF(LoopIndex, Mult1, DefInst)) {
				++LoopInvariantOperandCount;
			}
		}
	}
	if (nullptr != Mult2) {
		++OperandCount;
		if (Mult2->IsImmedOp() || this->ReplaceLoopRegWithConst(LoopIndex, Mult2, DefInst)) {
			NonConstMult2 = false;
			++LoopInvariantOperandCount;
		}
		else {
			// Determine if Mult2 is an induction variable or a loop-invariant DEF.
			if (this->IsLoopInductionVar(LoopIndex, Mult2, DefInst, IVFamilyIter, FamilyIndex)) {
				IVOp = Mult2;
				++IVCounter;
			}
			else if (this->IsUseLoopInvariantDEF(LoopIndex, Mult2, DefInst)) {
				++LoopInvariantOperandCount;
			}
		}
	}

	if (1 == IVCounter) { // must find exactly one induction var among the operands.
		if (!(NonConstAdd1 || NonConstAdd2)) { // Two constants; fold them by adding
			STARS_uval_t SumValue = Add1->GetImmedValue() + Add2->GetImmedValue();
			Add1 = DefInst->MakeImmediateOpnd(SumValue);
			Add2 = nullptr;
			--OperandCount;
			--LoopInvariantOperandCount;
			NonConstAdd2 = true;
		}
		if (!(NonConstMult1 || NonConstMult2)) { // Two constants; fold them by multiplying
			STARS_uval_t ProductValue = Mult1->GetImmedValue() * Mult2->GetImmedValue();
			Mult1 = DefInst->MakeImmediateOpnd(ProductValue);
			Mult2 = nullptr;
			--OperandCount;
			--LoopInvariantOperandCount;
			NonConstMult2 = true;
		}
		if ((OperandCount > 3) || (OperandCount != (IVCounter + LoopInvariantOperandCount))) {
			// Not the right operands for DIV linear function a * x + b.
			DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
		}
		else { // construct DIV and add it to the right family.
			if ((IVOp != Mult1) && (IVOp != Mult2)) {
				// Problem. We need a * IV + b and we have something like a * b + IV.
				DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
			}
			else {
				if (IVOp == Mult1) {
					STARSOpndTypePtr SearchOp = CloneIfNecessary(Mult1, UseFP);
					CanonicalizeOpnd(SearchOp);
					STARSDefUseIter IVUseIter = DefInst->FindUse(SearchOp);
					assert(IVUseIter != DefInst->GetLastUse());
					DIV.IVExpr.InductionVar = (*IVUseIter);
					// Make Mult2 into a DefOrUse
					if (MDIsDataFlowOpnd(Mult2, UseFP)) {
						STARSOpndTypePtr SearchOp2 = CloneIfNecessary(Mult2, UseFP);
						CanonicalizeOpnd(SearchOp2);
						STARSDefUseIter Mult2UseIter = DefInst->FindUse(SearchOp2);
						assert(Mult2UseIter != DefInst->GetLastUse());
						DIV.IVExpr.Multiplier = (*Mult2UseIter);
					}
					else {
						assert(Mult2->IsImmedOp());
						DefOrUse Mult2Use(Mult2);
						DIV.IVExpr.Multiplier = Mult2Use;
					}
				}
				else {
					assert(IVOp == Mult2);
					STARSOpndTypePtr SearchOp = CloneIfNecessary(Mult2, UseFP);
					CanonicalizeOpnd(SearchOp);
					STARSDefUseIter IVUseIter = DefInst->FindUse(SearchOp);
					assert(IVUseIter != DefInst->GetLastUse());
					DIV.IVExpr.InductionVar = (*IVUseIter);
					// Make Mult1 into a DefOrUse
					if (MDIsDataFlowOpnd(Mult1, UseFP)) {
						STARSOpndTypePtr SearchOp1 = CloneIfNecessary(Mult1, UseFP);
						CanonicalizeOpnd(SearchOp1);
						STARSDefUseIter Mult1UseIter = DefInst->FindUse(SearchOp1);
						assert(Mult1UseIter != DefInst->GetLastUse());
						DIV.IVExpr.Multiplier = (*Mult1UseIter);
					}
					else {
						assert(Mult1->IsImmedOp());
						DefOrUse Mult1Use(Mult1);
						DIV.IVExpr.Multiplier = Mult1Use;
					}
				}
				if ((nullptr == Add1) && (nullptr != Add2)) {
					Add1 = Add2;
					Add2 = nullptr;
				}
				assert(nullptr == Add2); // should have been folded into Add1
				// Make Add1 into a DefOrUse
				if (nullptr == Add1) {
					// Make a zero addend
					STARSOpndTypePtr ZeroAddend = DefInst->MakeImmediateOpnd(0);
					DefOrUse ZeroUse(ZeroAddend);
					DIV.IVExpr.Addend = ZeroUse;
					DIV.IVExpr.SubtractAddend = false;
				}
				else if (MDIsDataFlowOpnd(Add1, UseFP)) {
					STARSOpndTypePtr SearchOp1 = CloneIfNecessary(Add1, UseFP);
					CanonicalizeOpnd(SearchOp1);
					STARSDefUseIter Add1UseIter = DefInst->FindUse(SearchOp1);
					assert(Add1UseIter != DefInst->GetLastUse());
					DIV.IVExpr.Addend = (*Add1UseIter);
					DIV.IVExpr.SubtractAddend = false;
				}
				else {
					assert(Add1->IsImmedOp());
					STARS_sval_t SignedImmedValue = (STARS_sval_t) Add1->GetImmedValue();
					if (SignedImmedValue >= 0) {
						DefOrUse Add1Use(Add1);
						DIV.IVExpr.Addend = Add1Use;
						DIV.IVExpr.SubtractAddend = false;
					}
					else { // Make negative addend a subtrahend
						SignedImmedValue = (-SignedImmedValue);
						STARSOpndTypePtr SubtrahendOp = DefInst->MakeImmediateOpnd((STARS_uval_t) SignedImmedValue);
						DefOrUse SubtrahendUse(SubtrahendOp);
						DIV.IVExpr.Addend = SubtrahendUse;
						DIV.IVExpr.SubtractAddend = true;
					}
				}
				STARSDefUseIter DIVDefIter = DefInst->GetFirstNonFlagsDef();
				if (DIVDefIter != DefInst->GetLastDef()) {
					DIV.DIVDefAddr = DefAddr;
					DIV.DIV = (*DIVDefIter);
					// DIV is complete, so store it in the IV family.
					IVFamilyIter->DependentInductionVars.push_back(DIV);
				}
				else {
					// Must have inst that DEFs only the flags, so we cannot determine DIV.
					DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
				}
			}
		}
	}
	else {
		DIV.DIVDefAddr = STARS_BADADDR; // signal failure to find DIV
	}

	return;
} // end of SMPFunction::FindDependentInductionVar()

// If RegOp USE in UseInst is an SCCP constant, create an immediate operand for it.
bool SMPFunction::ReplaceLoopRegWithConst(std::size_t LoopIndex, STARSOpndTypePtr &RegOp, SMPInstr *UseInst) {
	bool Replaced = false;
	if (RegOp->IsRegOp()) {
		STARSOpndTypePtr SearchOp = CloneIfSubwordReg(RegOp);
		CanonicalizeOpnd(SearchOp);
		STARSDefUseIter UseIter = UseInst->FindUse(SearchOp);
		assert(UseIter != UseInst->GetLastUse());
		int UseSSANum = UseIter->GetSSANum();
		SMPBasicBlock *CurrBlock = UseInst->GetBlock();
		int UseHashIndex = HashGlobalNameAndSSA(SearchOp, UseSSANum);
		bool LocalName = CurrBlock->IsLocalName(SearchOp);
		STARSSCCPMapIter ConstIter;
		bool ConstFound = this->FindSCCPConstIter(CurrBlock, UseHashIndex, LocalName, ConstIter);
		if (ConstFound) {
			STARS_uval_t ImmedValue = ConstIter->second.ConstValue;
			RegOp = UseInst->MakeImmediateOpnd(ImmedValue);
			Replaced = true;
		}
	}

	return Replaced;
} // end of SMPFunction::ReplaceRegWithConst()

// Create loop limit expr: BIVOpnd relationaloperator Opnd2
STARSExpression* SMPFunction::CreateLimitExpr(const std::size_t &LoopIndex, const struct InductionVarFamily &IVFamily, const struct LoopComparison &LoopCompExpr) {
	STARSOpndTypePtr BIVOp = IVFamily.BasicInductionVar.InductionVar.GetOp();
	SMPoperator RelationalOperator = LoopCompExpr.CompareOperator;
	bool BIVIsLeftOp = IsEqOpIgnoreBitwidth(LoopCompExpr.Operand1.GetOp(), BIVOp);
	bool BIVIsRightOp = false;
	if (!BIVIsLeftOp)
		BIVIsRightOp = IsEqOpIgnoreBitwidth(LoopCompExpr.Operand2.GetOp(), BIVOp);
	if ((SMP_NULL_OPERATOR == RelationalOperator) || (STARS_BADADDR == LoopCompExpr.CompareAddr)) {
		return nullptr;
	}

	SMPInstr *CompareInst = this->GetInstFromAddr(LoopCompExpr.CompareAddr);
	assert(nullptr != CompareInst);
	STARSExpression *LimitExpr = new STARSExpression();
	LimitExpr->SetParentFunc(this);
	LimitExpr->SetParentInst(CompareInst);
	LimitExpr->SetOriginalParentInst(CompareInst);

	// Assume that BIV will be on the left side, swap sides later if not.
	LimitExpr->SetOperator(RelationalOperator);
	LimitExpr->SetLeftOperand(LoopCompExpr.Operand1.GetOp());
	LimitExpr->SetLeftUseAddr(LoopCompExpr.CompareAddr);
	LimitExpr->SetLeftSSANum(LoopCompExpr.Operand1.GetSSANum());
	LimitExpr->SetRightOperand(LoopCompExpr.Operand2.GetOp());
	LimitExpr->SetRightUseAddr(LoopCompExpr.CompareAddr);
	LimitExpr->SetRightSSANum(LoopCompExpr.Operand2.GetSSANum());

	// Deal with the possibility that the comparison instruction involves a copy of the BIV. If so, expanding
	//  the comparison expression will not produce two sub-trees; it will just replace operands on the path to the BIV.
	bool LhsBIV = false;
	bool RhsBIV = false;
	if (!(BIVIsLeftOp || BIVIsRightOp)) {
		// See if expanding the loop comparison expr, while stopping on encountering the BIV, will succeed.
		bool StoppedOnIV = false;
		bool changed = false;
		set<int> DummyLoopRegHashes;
		set<STARS_ea_t> StackPtrCopySet;
		int DepthCounter = 0;
		if (LimitExpr->ExpandExpr(LimitExpr->GetParentInst()->GetAddr(), LoopIndex, false, true, false, false, false, DummyLoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
			BIVIsLeftOp = (nullptr != LimitExpr->GetConstLeftOperand()) && (!LimitExpr->HasLeftSubTree()) && IsEqOpIgnoreBitwidth(LimitExpr->GetConstLeftOperand(), BIVOp);
			if (!BIVIsLeftOp) {
				BIVIsRightOp = (nullptr != LimitExpr->GetConstRightOperand()) && (!LimitExpr->HasRightSubTree()) && IsEqOpIgnoreBitwidth(LimitExpr->GetConstRightOperand(), BIVOp);
			}
		}
		else {
			SMP_msg("ERROR: ExpandExpr() failure in CreateLimitExpr() attempt to find BIV for loop %zu in func at %llx\n",
				LoopIndex, (uint64_t) this->GetFirstFuncAddr());
		}
	}

	if (!(BIVIsLeftOp || BIVIsRightOp)) {
		delete LimitExpr;
		return nullptr;
	}

	if (BIVIsRightOp) {
		// Get the Phi DEF addr for the BIV.
		STARS_ea_t UseDefAddr = this->GetGlobalDefAddr(LimitExpr->GetConstRightOperand(), LimitExpr->GetRightSSANum());
		LimitExpr->SetRightPreLoopDefAddr(UseDefAddr);

		// Swap LoopCompExpr operands and invert the relational operator
		//  to normalize LimitExpr to always have BIVOp on the left.
		LimitExpr->SetOperator(InvertRelationalOperator(RelationalOperator));
		LimitExpr->SwapSides();
	}
	else {
		assert(BIVIsLeftOp);
		// Get the Phi DEF addr for the BIV.
		STARS_ea_t UseDefAddr = this->GetGlobalDefAddr(LimitExpr->GetConstLeftOperand(), LimitExpr->GetLeftSSANum());
		LimitExpr->SetLeftPreLoopDefAddr(UseDefAddr);
	}

	return LimitExpr;
} // end of SMPFunction::CreateLimitExpr()

// Create loop limit expr: BIVOpnd_InitExpr + (stride * IterationCountExpr)
STARSExpression *SMPFunction::CreateSecondaryBIVLimitExpr(const std::size_t &LoopIndex, const struct InductionVarFamily &IVFamily) {
	if (nullptr == this->LoopIterationsCountExprs[LoopIndex])
		return nullptr;

	SMPInstr *FirstInsideDefInst = this->GetInstFromAddr(IVFamily.BIVInsideLoopDefAddrs[0]);
	if (nullptr == FirstInsideDefInst) {
		return nullptr;
	}
	// Limit to multiplier of 1 initially. Generalize later.
	bool ImmedMultiplier = IVFamily.BasicInductionVar.Multiplier.GetOp()->IsImmedOp();
	if (!ImmedMultiplier || (1 != IVFamily.BasicInductionVar.Multiplier.GetOp()->GetImmedValue())) {
		SMP_msg("ERROR: BIV does not have multiplier of value 1.\n");
		return nullptr;
	}
	STARSOpndTypePtr StrideOp = IVFamily.BasicInductionVar.Addend.GetOp()->clone();
	if (!StrideOp->IsImmedOp()) {
		SMP_msg("ERROR: BIV does not have constant addend.\n");
		return nullptr;
	}

	// If addend (stride) is subtracted, make it a negative immediate value.
	if (IVFamily.BasicInductionVar.SubtractAddend) {
		STARS_uval_t StrideValue = StrideOp->GetImmedValue();
		STARS_sval_t SignedStrideValue = (STARS_sval_t) StrideValue;
		SignedStrideValue = (0 - SignedStrideValue);
		StrideOp = FirstInsideDefInst->MakeImmediateOpnd((STARS_uval_t) SignedStrideValue);
	}

	STARSExpression *LimitExpr = new STARSExpression();
	LimitExpr->SetOriginalParentInst(FirstInsideDefInst);
	LimitExpr->SetParentInst(FirstInsideDefInst);
	LimitExpr->SetParentFunc(this);
	LimitExpr->SetOperator(SMP_ADD);
	LimitExpr->SetLeftTree(IVFamily.BIVInitExpr->Clone());
	STARSExpression *LimitRHS = new STARSExpression();
	LimitRHS->SetOriginalParentInst(FirstInsideDefInst);
	LimitRHS->SetParentInst(FirstInsideDefInst);
	LimitRHS->SetParentFunc(this);
	LimitRHS->SetOperator(SMP_U_MULTIPLY);
	LimitRHS->SetLeftOperand(StrideOp);
	LimitRHS->SetLeftUseAddr(FirstInsideDefInst->GetAddr());
	assert(nullptr != this->LoopIterationsCountExprs[LoopIndex]);
	LimitRHS->SetRightTree(this->LoopIterationsCountExprs[LoopIndex]->Clone());
	LimitExpr->SetRightTree(LimitRHS);

	return LimitExpr;
} // end of SMPFunction::CreateSecondaryBIVLimitExpr()

// Create DIV init expr as linear function of its BIV init expr
STARSExpression *SMPFunction::CreateDIVInitExpr(const std::size_t &LoopIndex, const struct InductionVarFamily &IVFamily, const std::size_t DIVIndex) {
	struct DependentInductionVar DIV = IVFamily.DependentInductionVars[DIVIndex];
	STARSOpndTypePtr MulOp = DIV.IVExpr.Multiplier.GetOp();
	bool ConstMul = MulOp->IsImmedOp();
	STARSExpression *DIVExpr = new STARSExpression();
	bool UnityMul = ConstMul && (1 == MulOp->GetImmedValue());
	DIVExpr->SetParentFunc(this);
	assert(DIV.DIVDefAddr < STARS_PSEUDO_ID_MIN);
	SMPInstr *DIVDefInst = this->GetInstFromAddr(DIV.DIVDefAddr);
	assert(nullptr != DIVDefInst);
	DIVExpr->SetOriginalParentInst(DIVDefInst);
	DIVExpr->SetParentInst(DIVDefInst);
	STARSDefUseIter IVUseIter = DIVDefInst->FindUse(DIV.IVExpr.InductionVar.GetOp());
	assert(IVUseIter != DIVDefInst->GetLastUse());
	int IVSSANum = IVUseIter->GetSSANum();
	if (DIV.IVExpr.SubtractAddend) {
		DIVExpr->SetOperator(SMP_SUBTRACT);
	}
	else {
		DIVExpr->SetOperator(SMP_ADD);
	}
	if (UnityMul) { // Typical, simple case.
		// DIV = IV +/- Addend.
		DIVExpr->SetLeftOperand(DIV.IVExpr.InductionVar.GetOp());
		DIVExpr->SetLeftUseAddr(DIV.DIVDefAddr);
		DIVExpr->SetLeftSSANum(IVSSANum);
		DIVExpr->SetRightOperand(DIV.IVExpr.Addend.GetOp());
		DIVExpr->SetRightUseAddr(DIV.DIVDefAddr);
	}
	else { // MulOp is not 1.
		// DIV = (MulOp * IV) +/- Addend.
		DIVExpr->SetRightOperand(DIV.IVExpr.Addend.GetOp());
		DIVExpr->SetRightUseAddr(DIV.DIVDefAddr);
		STARSExpression *DIVExprLeftTree = new STARSExpression();
		DIVExprLeftTree->SetParentFunc(this);
		DIVExprLeftTree->SetOriginalParentInst(DIVDefInst);
		DIVExprLeftTree->SetParentInst(DIVDefInst);
		DIVExprLeftTree->SetOperator(SMP_U_MULTIPLY);
		DIVExprLeftTree->SetLeftOperand(DIV.IVExpr.InductionVar.GetOp());
		DIVExprLeftTree->SetLeftUseAddr(DIV.DIVDefAddr);
		DIVExprLeftTree->SetLeftSSANum(IVSSANum);
		DIVExprLeftTree->SetRightOperand(MulOp);
		DIVExprLeftTree->SetRightUseAddr(DIV.DIVDefAddr);
		DIVExpr->SetLeftTree(DIVExprLeftTree);
	}

	return DIVExpr;
} // end of SMPFunction::CreateDIVInitExpr()

// Create DIV limit expr as linear function of its BIV limit expr
STARSExpression *SMPFunction::CreateDIVLimitExpr(const std::size_t &LoopIndex, const struct InductionVarFamily &IVFamily, const std::size_t DIVIndex) {
	if (nullptr == IVFamily.BIVLimitExpr)
		return nullptr;

	struct DependentInductionVar DIV = IVFamily.DependentInductionVars[DIVIndex];
	STARSOpndTypePtr MulOp = DIV.IVExpr.Multiplier.GetOp();
	bool ConstMul = MulOp->IsImmedOp();
	STARSExpression *DIVExpr = new STARSExpression();
	bool UnityMul = ConstMul && (1 == MulOp->GetImmedValue());
	DIVExpr->SetParentFunc(this);
	assert(DIV.DIVDefAddr < STARS_PSEUDO_ID_MIN);
	SMPInstr *DIVDefInst = this->GetInstFromAddr(DIV.DIVDefAddr);
	assert(nullptr != DIVDefInst);
	DIVExpr->SetOriginalParentInst(DIVDefInst);
	DIVExpr->SetParentInst(DIVDefInst);
	STARSDefUseIter IVUseIter = DIVDefInst->FindUse(DIV.IVExpr.InductionVar.GetOp());
	assert(IVUseIter != DIVDefInst->GetLastUse());
	int IVSSANum = IVUseIter->GetSSANum();
	if (DIV.IVExpr.SubtractAddend) {
		DIVExpr->SetOperator(SMP_SUBTRACT);
	}
	else {
		DIVExpr->SetOperator(SMP_ADD);
	}
	if (UnityMul) { // Typical, simple case.
		// DIV = IV +/- Addend.
		DIVExpr->SetLeftTree(IVFamily.BIVLimitExpr->Clone());
		DIVExpr->SetRightOperand(DIV.IVExpr.Addend.GetOp());
		DIVExpr->SetRightUseAddr(DIV.DIVDefAddr);
	}
	else { // MulOp is not 1.
		// DIV = (MulOp * IV) +/- Addend.
		DIVExpr->SetRightOperand(DIV.IVExpr.Addend.GetOp());
		DIVExpr->SetRightUseAddr(DIV.DIVDefAddr);
		STARSExpression *DIVExprLeftTree = new STARSExpression();
		DIVExprLeftTree->SetParentFunc(this);
		DIVExprLeftTree->SetOriginalParentInst(DIVDefInst);
		DIVExprLeftTree->SetParentInst(DIVDefInst);
		DIVExprLeftTree->SetLeftTree(IVFamily.BIVLimitExpr->Clone());
		DIVExprLeftTree->SetRightOperand(MulOp);
		DIVExprLeftTree->SetRightUseAddr(DIV.DIVDefAddr);
		DIVExpr->SetLeftTree(DIVExprLeftTree);
	}

	return DIVExpr;
} // end of SMPFunction::CreateDIVLimitExpr()

// Create loop iteration count expr
STARSExpression* SMPFunction::CreateIterationsExpr(std::size_t LoopIndex, const struct InductionVarFamily &IVFamily, bool PositiveIncrement, STARSExpression *InitExpr, STARSExpression *LimitExpr) {
	// For now, we will limit ourselves to BIVars with a loop update of BIV *= 1 + constant.
	bool SimpleBIV = (IVFamily.BasicInductionVar.Multiplier.GetOp()->IsImmedOp()
		&& (IVFamily.BasicInductionVar.Multiplier.GetOp()->GetImmedValue() == 1)
		&& IVFamily.BasicInductionVar.Addend.GetOp()->IsImmedOp());
	if (!SimpleBIV) {
		SMP_msg("ERROR: CreateIterationsExpr() did not find SimpleBIV for loop %u\n", LoopIndex);
		return nullptr;
	}

	STARS_uval_t IncDecValue = IVFamily.BasicInductionVar.Addend.GetOp()->GetImmedValue();
	// bool PositiveIncrement = (!IVFamily.BasicInductionVar.SubtractAddend);
	bool BottomTestingLoop = (this->LoopTypesByLoopNum[LoopIndex] == STARS_BOTTOM_TESTING_LOOP);

	// For loops with a BIV that counts upward by 1:
	//
	// TopTesting   BranchExitsLoop    RelationalOperator     IterationCount
	// ----------   ---------------    ------------------     --------------
	//     Y               Y                   >              (Limit + 1 - Init) / 1
	//     Y               Y                   >=             (Limit - Init) / 1
	//     Y               Y                   <              undefined
	//     Y               Y                   <=             undefined
	//     Y               Y                   ==             (Limit - Init) / 1
	//     Y               Y                   !=             undefined
	//     Y               N                   >              undefined
	//     Y               N                   >=             undefined
	//     Y               N                   <              (Limit - Init) / 1
	//     Y               N                   <=             (Limit + 1 - Init) / 1
	//     Y               N                   ==             undefined
	//     Y               N                   !=             (Limit - Init) / 1
	//     N               Y                   >              (Limit + 1 - Init) / 1
	//     N               Y                   >=             (Limit - Init) / 1
	//     N               Y                   <              undefined
	//     N               Y                   <=             undefined
	//     N               Y                   ==             (Limit - Init) / 1
	//     N               Y                   !=             undefined
	//     N               N                   >              undefined
	//     N               N                   >=             undefined
	//     N               N                   <              (Limit - Init) / 1
	//     N               N                   <=             (Limit + 1 - Init) / 1
	//     N               N                   ==             undefined
	//     N               N                   !=             (Limit - Init) / 1
	//
	// We see that certain COND_BRANCH relational operators add 1 to the iteration count.
	// Certain combinations make no sense, e.g. counting up by 1 from Init to Limit while exiting
	// the loop when BIV < Limit. Some of these cases could be zero-trip loops that we will analyze
	// in greater depth later.
	// NOTE: There is no difference between top-testing and bottom-testing loops in the table.
	//  That is because the only difference in practice is the possibility that a top-testing loop
	//  executes zero iterations, while the bottom-testing version of the loop would execute one
	//  iteration in that case. If we have security properties that depend on identifying zero-trip
	//  loops, we will need a separate analysis later.
	// NOTE: In these tables, we assume that == and != are used properly. There is the possibility
	//  that someone could write code such as: for (i = 0; i != limit; i += 8) and have a limit
	//  that is not a multiple of 8. Again, we will need a special-case analysis if we want to detect
	//  the possibility of such an infinite loop.

	// For loops with a BIV that counts downward by 1:
	//
	// TopTesting   BranchExitsLoop    RelationalOperator     IterationCount
	// ----------   ---------------    ------------------     --------------
	//     Y               Y                   >              undefined
	//     Y               Y                   >=             undefined
	//     Y               Y                   <              (Init + 1 - Limit) / 1
	//     Y               Y                   <=             (Init - Limit) / 1
	//     Y               Y                   ==             (Init - Limit) / 1
	//     Y               Y                   !=             undefined
	//     Y               N                   >              (Init - Limit) / 1
	//     Y               N                   >=             (Init + 1 - Limit) / 1
	//     Y               N                   <              undefined
	//     Y               N                   <=             undefined
	//     Y               N                   ==             undefined
	//     Y               N                   !=             (Init - Limit) / 1
	//     N               Y                   >              undefined
	//     N               Y                   >=             undefined
	//     N               Y                   <              (Init + 1 - Limit) / 1
	//     N               Y                   <=             (Init - Limit) / 1
	//     N               Y                   ==             (Init - Limit) / 1
	//     N               Y                   !=             undefined
	//     N               N                   >              (Init - Limit) / 1
	//     N               N                   >=             (Init + 1 - Limit) / 1
	//     N               N                   <              undefined
	//     N               N                   <=             undefined
	//     N               N                   ==             undefined
	//     N               N                   !=             (Init - Limit) / 1

	// We can generalize the formulae above to allow increment or decrement by values other than 1.
	//  Whenever we add 1 in the numerator in the table, just add the increment or decrement value.
	//  Whenever we divide by 1 in the denominator, divide by the increment or decrement value.

	// Weed out undefined cases.
	bool Undefined = false;
	assert(LoopIndex < this->LoopComparisonExprs.size());
	SMPoperator BranchOperator = this->LoopComparisonExprs[LoopIndex].CompareOperator;
	if (PositiveIncrement) {
		if (this->LoopComparisonExprs[LoopIndex].ExitsLoop) {
			Undefined = ((BranchOperator != SMP_GREATER_THAN) && (BranchOperator != SMP_GREATER_EQUAL)
				&& (BranchOperator != SMP_ABOVE) && (BranchOperator != SMP_ABOVE_EQUAL) && (BranchOperator != SMP_EQUAL));
		}
		else {
			Undefined = ((BranchOperator != SMP_LESS_THAN) && (BranchOperator != SMP_LESS_EQUAL)
				&& (BranchOperator != SMP_BELOW) && (BranchOperator != SMP_BELOW_EQUAL) && (BranchOperator != SMP_NOT_EQUAL));
		}
	}
	else { // Decrement by one on each iteration
		if (this->LoopComparisonExprs[LoopIndex].ExitsLoop) {
			Undefined = ((BranchOperator != SMP_LESS_THAN) && (BranchOperator != SMP_LESS_EQUAL)
				&& (BranchOperator != SMP_BELOW) && (BranchOperator != SMP_BELOW_EQUAL) && (BranchOperator != SMP_EQUAL));
		}
		else {
			Undefined = ((BranchOperator != SMP_GREATER_THAN) && (BranchOperator != SMP_GREATER_EQUAL)
				&& (BranchOperator != SMP_ABOVE) && (BranchOperator != SMP_ABOVE_EQUAL) && (BranchOperator != SMP_NOT_EQUAL));
		}
	}
	if (Undefined) {
		SMP_msg("ERROR: CreateIterationsExpr() found Undefined terminating relational operator for loop %u operator %s PosIncr: %d BottomTesting: %d\n", 
			LoopIndex, OperatorText[BranchOperator], PositiveIncrement, BottomTestingLoop);
		return nullptr;
	}

	STARSOpndTypePtr IncDecOp = InitExpr->GetParentInst()->MakeImmediateOpnd(IncDecValue);
	STARS_sval_t LimitIncrease = 0;
	if (PositiveIncrement) {
		if ((BranchOperator == SMP_LESS_EQUAL) || (BranchOperator == SMP_GREATER_THAN)
			|| (BranchOperator == SMP_BELOW_EQUAL) || (BranchOperator == SMP_ABOVE)) {
			LimitIncrease += IncDecValue; // only because we weeded out some undefined cases; change if we redefine those cases.
		}
	}
	else { // Decrement
		if ((BranchOperator == SMP_LESS_THAN) || (BranchOperator == SMP_GREATER_EQUAL)
			|| (BranchOperator == SMP_BELOW) || (BranchOperator == SMP_ABOVE_EQUAL)) {
			LimitIncrease += IncDecValue;
		}
	}

	bool NeedLimitIncrease = (0 != LimitIncrease);
	this->LoopExecutesWithLimitValue[LoopIndex] = NeedLimitIncrease;
	this->LoopIncrementValue[LoopIndex] = (STARS_sval_t) IncDecValue;

	// At this point, the iteration count is (Limit + LimitIncrease - Init) / IncDecValue for the SimpleBIV cases
	//  with PositiveIncrement, and (Init + LimitIncrease - Limit) / IncDecValue for the Decrement case.
	//  We will omit the division by 1 case, of course, although SimplifyExpr() should remove it.
	// If Init is a constant, then we can fold (LimitIncrease - Init) or (Init + LimitIncrease) into a constant operand.
	STARSExpression *IterationsExpr = new STARSExpression();
	IterationsExpr->SetParentInst(LimitExpr->GetParentInst());
	IterationsExpr->SetOriginalParentInst(LimitExpr->GetOriginalParentInst());
	IterationsExpr->SetParentFunc(this);
	bool LimitIncreaseFolded = false;
	if (PositiveIncrement) {
		if ((InitExpr->GetOperator() == SMP_ASSIGN) && InitExpr->GetConstLeftOperand()->IsImmedOp()) {
			// Only LeftOperand is valid, and it is a constant.
			STARS_sval_t InitValue = (STARS_sval_t) InitExpr->GetConstLeftOperand()->GetImmedValue();
			LimitIncrease -= InitValue;
			STARSOpndTypePtr RightOp = InitExpr->GetParentInst()->MakeImmediateOpnd((STARS_uval_t) LimitIncrease);
			IterationsExpr->SetRightOperand(RightOp);
			LimitIncreaseFolded = true;
			NeedLimitIncrease = false; // already in IterationsExpr
		}
		else { // non-constant InitExpr
			if (InitExpr->GetOperator() == SMP_ASSIGN) { // only an operand
				IterationsExpr->SetRightOperand(InitExpr->GetLeftOperand());
				IterationsExpr->SetRightUseAddr(InitExpr->GetLeftUseAddr());
				IterationsExpr->SetRightSSANum(InitExpr->GetLeftSSANum());
				IterationsExpr->SetRightPreLoopDefAddr(InitExpr->GetLeftPreLoopDefAddr());
			}
			else {
				IterationsExpr->SetRightTree(InitExpr);
			}
		}
		if (NeedLimitIncrease) {
			IterationsExpr->SetOperator(SMP_SUBTRACT);  // (LimitExpr RHS + LimitIncrease) - InitExpr
			STARSOpndTypePtr RightOp = InitExpr->GetParentInst()->MakeImmediateOpnd((STARS_uval_t)LimitIncrease);
			STARSExpression *LeftExpr = new STARSExpression();
			LeftExpr->SetParentInst(LimitExpr->GetParentInst());
			LeftExpr->SetOriginalParentInst(LimitExpr->GetOriginalParentInst());
			LeftExpr->SetParentFunc(this);
			LeftExpr->SetRightOperand(RightOp);
			LeftExpr->SetOperator(SMP_ADD); // LimitExpr + LimitIncrease is the LeftExpr
			// If LimitExpr right side (limit) is just an operand, make it the LeftOperand for LeftExpr, else make it the LeftTree for LeftExpr.
			if (!LimitExpr->HasRightSubTree()) { // just RightOperand
				LeftExpr->SetLeftOperand(LimitExpr->GetRightOperand());
				LeftExpr->SetLeftUseAddr(LimitExpr->GetRightUseAddr());
				LeftExpr->SetLeftSSANum(LimitExpr->GetRightSSANum());
				LeftExpr->SetLeftPreLoopDefAddr(LimitExpr->GetRightPreLoopDefAddr());
			}
			else {
				LeftExpr->SetLeftTree(LimitExpr->GetRightTree());
			}
			IterationsExpr->SetLeftTree(LeftExpr);
		}
		else if (LimitIncreaseFolded) { // just need LimitExpr right hand side as left tree or operand.
			IterationsExpr->SetOperator(SMP_ADD);  // LimitExpr RHS + (folded LimitIncrease - InitExpr)
			// If LimitExpr RHS is just an operand, make it the LeftOperand, else make it the LeftTree.
			if (!LimitExpr->HasRightSubTree()) { // just RightOperand
				IterationsExpr->SetLeftOperand(LimitExpr->GetRightOperand());
				IterationsExpr->SetLeftUseAddr(LimitExpr->GetRightUseAddr());
				IterationsExpr->SetLeftSSANum(LimitExpr->GetRightSSANum());
				IterationsExpr->SetLeftPreLoopDefAddr(LimitExpr->GetRightPreLoopDefAddr());
			}
			else {
				IterationsExpr->SetLeftTree(LimitExpr->GetRightTree());
			}
		}
#if 0
		else { // never folded, no LimitIncrease, just LimitExpr - InitExpr
			IterationsExpr->SetOperator(SMP_SUBTRACT);
			IterationsExpr->SetLeftTree(LimitExpr);
		}
#else
		else { // never folded, no LimitIncrease, just LimitExprRHS - InitExpr
			IterationsExpr->SetOperator(SMP_SUBTRACT);
			if (!LimitExpr->HasRightSubTree()) { // just RightOperand
				IterationsExpr->SetLeftOperand(LimitExpr->GetRightOperand());
				IterationsExpr->SetLeftUseAddr(LimitExpr->GetRightUseAddr());
				IterationsExpr->SetLeftSSANum(LimitExpr->GetRightSSANum());
				IterationsExpr->SetLeftPreLoopDefAddr(LimitExpr->GetRightPreLoopDefAddr());
			}
			else {
				IterationsExpr->SetLeftTree(LimitExpr->GetRightTree());
			}
		}
#endif
	} // end if PositiveIncrement
	else { // decrement case
		if ((InitExpr->GetOperator() == SMP_ASSIGN) && InitExpr->GetConstLeftOperand()->IsImmedOp()) {
			// Only LeftOperand is valid, and it is a constant.
			STARS_sval_t InitValue = (STARS_sval_t) InitExpr->GetConstLeftOperand()->GetImmedValue();
			LimitIncrease += InitValue;
			STARSOpndTypePtr LeftOp = InitExpr->GetParentInst()->MakeImmediateOpnd((STARS_uval_t) LimitIncrease);
			IterationsExpr->SetLeftOperand(LeftOp);
			LimitIncreaseFolded = true;
			NeedLimitIncrease = false; // already in IterationsExpr
		}
		else { // non-constant InitExpr
			if (!NeedLimitIncrease) { // LHS of IterationsExpr is just InitExpr
				if (InitExpr->GetOperator() == SMP_ASSIGN) { // only an operand
					IterationsExpr->SetLeftOperand(InitExpr->GetLeftOperand());
					IterationsExpr->SetLeftUseAddr(InitExpr->GetLeftUseAddr());
					IterationsExpr->SetLeftSSANum(InitExpr->GetLeftSSANum());
					IterationsExpr->SetLeftPreLoopDefAddr(InitExpr->GetLeftPreLoopDefAddr());
				}
				else {
					IterationsExpr->SetLeftTree(InitExpr);
				}
			}
			else { // need LHS tree of (InitExpr + LimitIncrease)
				STARSExpression *LeftTree = new STARSExpression();
				LeftTree->SetParentInst(InitExpr->GetParentInst());
				LeftTree->SetOriginalParentInst(InitExpr->GetOriginalParentInst());
				LeftTree->SetParentFunc(this);
				if (InitExpr->GetOperator() == SMP_ASSIGN) { // only an operand
					LeftTree->SetLeftOperand(InitExpr->GetLeftOperand());
					LeftTree->SetLeftUseAddr(InitExpr->GetLeftUseAddr());
					LeftTree->SetLeftSSANum(InitExpr->GetLeftSSANum());
					LeftTree->SetLeftPreLoopDefAddr(InitExpr->GetLeftPreLoopDefAddr());
				}
				else {
					LeftTree->SetLeftTree(InitExpr);
				}
				LeftTree->SetOperator(SMP_ADD);
				STARSOpndTypePtr LeftRightOp = InitExpr->GetParentInst()->MakeImmediateOpnd((STARS_uval_t) LimitIncrease);
				LeftTree->SetRightOperand(LeftRightOp);
				IterationsExpr->SetLeftTree(LeftTree);
			}
		}
		// Now we just need LimitExpr right hand side as right tree or operand.
		IterationsExpr->SetOperator(SMP_SUBTRACT);  // (folded InitExpr + LimitIncrease) - LimitExpr RHS
		// If LimitExpr RHS is just an operand, make it the RightOperand, else make it the RightTree.
		if (!LimitExpr->HasRightSubTree()) { // just RightOperand
			IterationsExpr->SetRightOperand(LimitExpr->GetRightOperand());
			IterationsExpr->SetRightUseAddr(LimitExpr->GetRightUseAddr());
			IterationsExpr->SetRightSSANum(LimitExpr->GetRightSSANum());
			IterationsExpr->SetRightPreLoopDefAddr(LimitExpr->GetRightPreLoopDefAddr());
		}
		else {
			IterationsExpr->SetRightTree(LimitExpr->GetRightTree());
		}
	} // end .. else decrement case

	// We want to simplify the resulting expression before we divide by IncDecValue.
	//  The division otherwise might have to have complex simplifications. E.g. if we
	//  have InitExpr of RSP-200 and LimitExpr of RSP-100 and IncDecValue of 4, we prefer
	//  to simplify ((RSP - 100) - (RSP - 200)) to 100 and then return 100 / 4. Our 
	//  caller will call SimplifyExpr() on the expr we return.
	assert(nullptr != IterationsExpr);
	bool Simplified = IterationsExpr->SimplifyDriver();
	if (IncDecValue != 1) {
		// Divide by IncDecOp.
		STARSExpression *NewIterExpr = new STARSExpression();
		NewIterExpr->SetParentInst(LimitExpr->GetParentInst());
		NewIterExpr->SetOriginalParentInst(LimitExpr->GetOriginalParentInst());
		NewIterExpr->SetParentFunc(this);
		NewIterExpr->SetOperator(SMP_U_DIVIDE);
		NewIterExpr->SetRightOperand(IncDecOp);
		NewIterExpr->SetRightUseAddr(this->LoopComparisonExprs[LoopIndex].CompareAddr);
		NewIterExpr->SetLeftTree(IterationsExpr);
		return NewIterExpr;
	}
	else {
		return IterationsExpr;
	}
} // end of SMPFunction::CreateIterationsExpr()

// Create a Memory range expression set for all static, indirect or indexed memory writes in the loop.
//  Return true on success, false on failure.
bool SMPFunction::CreateSPARKMemoryWriteRangeExpr(size_t LoopIndex, bool RecordLoopRegs, set<int> &LoopRegHashes, STARSExprSet &MemWriteExprs, STARSMemWriteExprsList &MemWriteExprWidths, vector<set<STARS_ea_t> > &StackPtrCopiesVector) {
	STARSExpression *MemoryRangeExpr = nullptr;

	// Iterate through loop instructions and find all indirect or indexed memory writes.
	list<size_t> LoopBlockList;
	this->BuildLoopBlockList(LoopIndex, LoopBlockList);
	bool success = false;
	bool OneFailure = false;
	int StackPtrCopiesVectorIndex = 0;
	// Triple: STARSExprSetIter, ByteWidth, index into vector of StackPtrCopiesVector
	uint16_t ByteWidth;

	for (size_t BlockNum : LoopBlockList) {
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);
		if (CurrBlock->HasIndirectMemWrite() || CurrBlock->HasStaticMemWrite()) {
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				if (CurrInst->MDHasAnyRepeatPrefix() && CurrInst->MDIsPossibleStringLoopingOpcode()) {
					// Looping string opcodes are dealt with separately.
					continue;
				}
				if (CurrInst->HasIndirectMemoryWrite() || CurrInst->HasStaticMemWrite()) {
					STARS_ea_t InstAddr = CurrInst->GetAddr();
					this->LoopWritesMemory[LoopIndex] = true;
					STARSOpndTypePtr MemDefOp = CurrInst->GetMemDef()->clone();
					CurrInst->MDGetUnnormalizedOp(MemDefOp);
					assert(MemDefOp->IsMemOp());
					// Start with the current memory addressing expression.
					MemoryRangeExpr = this->CreateMemoryAddressExpr(MemDefOp, CurrInst);
					if (nullptr == MemoryRangeExpr) {
						OneFailure = true; // failure
						SMP_msg("ERROR: CreateSPARKMemoryWriteRangeExpr() abandoned due to CreateMemoryAddressExpr() failure at %llx.\n", (uint64_t) InstAddr);
						continue;
					}
					// Expand and simplify the addressing expression.
					MemoryRangeExpr->EvaluateConsts();
					bool ParentChanged = MemoryRangeExpr->SimplifyDriver();
					ByteWidth = MemDefOp->GetByteWidth();
					bool HasGlobalStaticMemOp = MemDefOp->IsStaticMemOp();
					this->LoopWritesGlobalStaticMemory[LoopIndex] = (HasGlobalStaticMemOp || this->LoopWritesGlobalStaticMemory[LoopIndex]);

					if (MemoryRangeExpr->AreAllRegsLoopInvariant()) {
						// We need to place this memory write expr into the function-level non-loop expr container.
						//  This container is InArgsUsedInMemWrites, which should already have this expr, so there
						//  is no further work for us to do here. The exception is when the memory write expr is
						//  a global static mem write, which was not necessarily traced back to an InArg. We will
						//  store those in the InArgsUsedInMemWrites container.
						SMP_msg("INFO: Loop-invariant mem expr inside loop at %llx\n", InstAddr);
						if (!HasGlobalStaticMemOp) {
							continue;
						}
						else {
							pair<STARSExprSetIter, bool> InsertResult = this->InArgsUsedInMemWrites[LoopIndex + 1].insert(MemoryRangeExpr);
							if (InsertResult.second) {
								pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, ByteWidth);
								this->InArgsUsedInMemWriteByteWidths[LoopIndex + 1].push_back(InsertValue);
								// Fall through to expand and simplify code below
							}
						}
					}

#if 0
					// Make a clone in case we succeed in expansion and need to trace incoming values to the loop.
					STARSExpression *MemoryRangeExprClone = MemoryRangeExpr->Clone();
#endif
					bool StoppedOnIV = false;
					bool changed = false;
					set<STARS_ea_t> StackPtrCopySet;
					int DepthCounter = 0;
					success = MemoryRangeExpr->ExpandExpr(MemoryRangeExpr->GetParentInst()->GetAddr(), LoopIndex, false, true, true, RecordLoopRegs, false, LoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter);
					if (!success) {
						delete MemoryRangeExpr;
						MemoryRangeExpr = nullptr;
						SMP_msg("ERROR: CreateSPARKMemoryWriteRangeExpr() error due to ExpandExpr() failure on loop %zu InstAddr %llx in func at %llx\n",
							LoopIndex, (uint64_t) InstAddr, (uint64_t) this->GetFirstFuncAddr());
						OneFailure = true; // failure
						continue; 
					}
					MemoryRangeExpr->EvaluateConsts();
					ParentChanged = MemoryRangeExpr->SimplifyDriver();
					// Handle multiple memory writes in one loop by keeping a set and combining write exprs later.
					pair<STARSExprSetIter, bool> InsertResult = MemWriteExprs.insert(MemoryRangeExpr);
					if (InsertResult.second) { // New expr in set
						int TempIndex = StackPtrCopiesVectorIndex;
						if (StackPtrCopySet.empty()) {
							TempIndex = -1; // don't need to push empty sets onto vector
						}
						else {
							StackPtrCopiesVector.push_back(StackPtrCopySet);
							++StackPtrCopiesVectorIndex;
						}
						pair<size_t, int> InsertPair2((size_t) ByteWidth, TempIndex);
						pair<STARSExprSetIter, pair<size_t, int> > InsertValue(InsertResult.first, InsertPair2);
						MemWriteExprWidths.push_back(InsertValue);
						// StackPtrCopySet.clear();
					}					
				}
			} // end for all instructions in block
		} // end if indirect memory write in current block
	} // end for all blocks

	if (global_STARS_program->ShouldSTARSTranslateToSPARKAda()) {
		// Combine exprs like (RDI+2) and (RDI+3) into a single expr of greater width for proof simplicity.
		this->CombineMemoryExprs(MemWriteExprWidths, MemWriteExprs, StackPtrCopiesVector);
	}

	success = (0 < MemWriteExprs.size());
	if (OneFailure)
		this->LoopAnalysisProblems[LoopIndex] = true;

	return success;
} // end of SMPFunction::CreateSPARKMemoryWriteRangeExpr()

// Create a Memory range expression set for all static, indirect or indexed memory reads in the loop.
//  Return true on success, false on failure.
bool SMPFunction::CreateSPARKMemoryReadRangeExprs(size_t LoopIndex, bool RecordLoopRegs, set<int> &LoopRegHashes, STARSExprSet &MemReadExprs, STARSMemWriteExprsList &MemReadExprWidths, vector<set<STARS_ea_t> > &StackPtrCopiesVector) {
	STARSExpression *MemoryRangeExpr = nullptr;

	// Iterate through loop instructions and find all static, indirect or indexed memory reads.
	list<size_t> LoopBlockList;
	this->BuildLoopBlockList(LoopIndex, LoopBlockList);
	bool success = false;
	bool OneFailure = false;
	int StackPtrCopiesVectorIndex = 0;
	// Triple: STARSExprSetIter, ByteWidth, index into vector of StackPtrCopiesVector
	uint16_t ByteWidth;

	for (size_t BlockNum : LoopBlockList) {
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);
		if (CurrBlock->HasIndirectMemRead() || CurrBlock->HasStaticMemRead()) {
			this->LoopReadsMemory[LoopIndex] = true;
			if (CurrBlock->HasStaticMemRead()) {
				for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
					SMPInstr *CurrInst = (*InstIter);
					if (CurrInst->MDHasAnyRepeatPrefix() && CurrInst->MDIsPossibleStringLoopingOpcode()) {
						// Looping string opcodes are dealt with separately.
						continue;
					}
					if (CurrInst->HasStaticMemRead()) {
						STARS_ea_t InstAddr = CurrInst->GetAddr();
						STARSOpndTypePtr MemUseOp = CurrInst->GetMemUse()->clone();
						CurrInst->MDGetUnnormalizedOp(MemUseOp);
						assert(MemUseOp->IsMemOp());

						// For supporting move_globals, we only care about true static memory operands
						//  with static offsets in the operand.
						STARS_ea_t Displacement = MemUseOp->GetAddr();
						if (!IsImmedGlobalAddress(Displacement))
							continue;

						// Start with the current memory addressing expression.
						MemoryRangeExpr = this->CreateMemoryAddressExpr(MemUseOp, CurrInst);
						if (nullptr == MemoryRangeExpr) {
							OneFailure = true; // failure
							SMP_msg("ERROR: CreateSPARKMemoryReadRangeExprs() abandoned due to CreateMemoryAddressExpr() failure at %llx.\n", (uint64_t)InstAddr);
							continue;
						}
						// Expand and simplify the addressing expression.
						MemoryRangeExpr->EvaluateConsts();
						bool ParentChanged = MemoryRangeExpr->SimplifyDriver();
						ByteWidth = MemUseOp->GetByteWidth();
						bool HasGlobalStaticMemOp = MemUseOp->IsStaticMemOp();
						this->LoopReadsGlobalStaticMemory[LoopIndex] = (HasGlobalStaticMemOp || this->LoopReadsGlobalStaticMemory[LoopIndex]);

#if 0	// seems SPARK oriented; does not appear useful for memory annotations
						if (MemoryRangeExpr->AreAllRegsLoopInvariant()) {
							// We need to place this memory write expr into the function-level non-loop expr container.
							//  This container is InArgsUsedInMemWrites, which should already have this expr, so there
							//  is no further work for us to do here. The exception is when the memory write expr is
							//  a global static mem write, which was not necessarily traced back to an InArg. We will
							//  store those in the InArgsUsedInMemWrites container.
							SMP_msg("INFO: Loop-invariant mem expr inside loop at %llx\n", InstAddr);
							if (!HasGlobalStaticMemOp) {
								continue;
							}
							else {
								pair<STARSExprSetIter, bool> InsertResult = this->InArgsUsedInMemWrites[LoopIndex + 1].insert(MemoryRangeExpr);
								if (InsertResult.second) {
									pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, ByteWidth);
									this->InArgsUsedInMemWriteByteWidths[LoopIndex + 1].push_back(InsertValue);
									// Fall through to expand and simplify code below
								}
							}
						}
#endif

						bool StoppedOnIV = false;
						bool changed = false;
						set<STARS_ea_t> StackPtrCopySet;
						int DepthCounter = 0;
						success = MemoryRangeExpr->ExpandExpr(MemoryRangeExpr->GetParentInst()->GetAddr(), LoopIndex, false, true, true, RecordLoopRegs, true, LoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter);
						if (!success) {
							delete MemoryRangeExpr;
							MemoryRangeExpr = nullptr;
							SMP_msg("ERROR: CreateSPARKMemoryReadRangeExpr() error due to ExpandExpr() failure on loop %zu InstAddr %llx in func at %llx\n",
								LoopIndex, (uint64_t)InstAddr, (uint64_t) this->GetFirstFuncAddr());
							OneFailure = true; // failure
							continue;
						}
						MemoryRangeExpr->EvaluateConsts();
						ParentChanged = MemoryRangeExpr->SimplifyDriver();
						// Handle multiple memory reads in one loop by keeping a set and combining write exprs later.
						pair<STARSExprSetIter, bool> InsertResult = MemReadExprs.insert(MemoryRangeExpr);
						if (InsertResult.second) { // New expr in set
							int TempIndex = StackPtrCopiesVectorIndex;
							if (StackPtrCopySet.empty()) {
								TempIndex = -1; // don't need to push empty sets onto vector
							}
							else {
								StackPtrCopiesVector.push_back(StackPtrCopySet);
								++StackPtrCopiesVectorIndex;
							}
							pair<size_t, int> InsertPair2((size_t)ByteWidth, TempIndex);
							pair<STARSExprSetIter, pair<size_t, int> > InsertValue(InsertResult.first, InsertPair2);
							MemReadExprWidths.push_back(InsertValue);
							// StackPtrCopySet.clear();
						}
					}
				} // end for all instructions in block
			} // end if static memory read in current block
		} // end if static or indirect memory read in current block
	} // end for all blocks

#if 0	// not yet emitting SPARK annotations for memory reads
	if (global_STARS_program->ShouldSTARSTranslateToSPARKAda()) {
		// Combine exprs like (RDI+2) and (RDI+3) into a single expr of greater width for proof simplicity.
		this->CombineMemoryExprs(MemReadExprWidths, MemReadExprs, StackPtrCopiesVector);
	}
#endif

	success = (!MemReadExprs.empty());
#if 0	// for SPARK use only?
	if (OneFailure)
		this->LoopAnalysisProblems[LoopIndex] = true;
#endif

	return success;
} // end of SMPFunction::CreateSPARKMemoryReadRangeExprs()

// create expression for the memory address computation in MemDefOp
STARSExpression* SMPFunction::CreateMemoryAddressExpr(const STARSOpndTypePtr &MemDefOp, SMPInstr *WriteInst) {
	STARSExpression *MemoryAddressExpr = nullptr;
	STARS_ea_t WriteAddr = WriteInst->GetAddr();
	if (MemDefOp->GetSegReg() == STARS_x86_R_cs && (!MemDefOp->IsStaticMemOp())) {
		SMP_msg("ERROR: CreateMemoryAddressExpr() fails on CS-relative MemDefOp at %llx\n", (uint64_t) WriteAddr);
		return MemoryAddressExpr; // cannot handle IP-relative memory writes, and should only happen for reads
	}

	int BaseReg, IndexReg;
	uint16_t ScaleFactor;
	STARS_ea_t offset;
	MDExtractAddressFields(MemDefOp, BaseReg, IndexReg, ScaleFactor, offset);

	// Construct an expression based on which address elements are present.
	STARS_regnum_t SegReg = MemDefOp->GetSegReg();
	bool HasIndexReg = (STARS_x86_R_none != IndexReg);
	bool IsScaled = HasIndexReg && (0 != ScaleFactor);
	bool HasBaseReg = (STARS_x86_R_none != BaseReg);
	bool HasOffset = (0 != offset);
	bool HasSegReg = (SegReg != STARS_x86_R_none);
	bool IPRelative = (SegReg == MD_CODE_SEGMENT_REG);

	STARSOpndTypePtr IndexRegOp = nullptr;
	STARSDefUseIter IndexUseIter = WriteInst->GetLastUse();
	if (HasIndexReg) {
		IndexRegOp = WriteInst->MakeRegOpnd((STARS_regnum_t) IndexReg);
		CanonicalizeOpnd(IndexRegOp);
		IndexUseIter = WriteInst->FindUse(IndexRegOp);
		assert(IndexUseIter != WriteInst->GetLastUse());
	}
	STARSOpndTypePtr BaseRegOp = nullptr;
	STARSDefUseIter BaseUseIter = WriteInst->GetLastUse();
	if (HasBaseReg) {
		BaseRegOp = WriteInst->MakeRegOpnd((STARS_regnum_t) BaseReg);
		CanonicalizeOpnd(BaseRegOp);
		BaseUseIter = WriteInst->FindUse(BaseRegOp);
		assert(BaseUseIter != WriteInst->GetLastUse());
	}
	STARSOpndTypePtr OffsetOp = nullptr;
	if (HasOffset) {
		OffsetOp = WriteInst->MakeImmediateOpnd((STARS_uval_t) offset);
		OffsetOp->SetByteWidth(MemDefOp->GetByteWidth());
	}


	MemoryAddressExpr = new STARSExpression();
	MemoryAddressExpr->SetParentInst(WriteInst); // also sets OriginalParentInst
	// MemoryAddressExpr->SetOriginalParentInst(WriteInst);
	MemoryAddressExpr->SetParentFunc(this);
	if (IsScaled) { // has IndexReg and ScaleFactor
		if (HasBaseReg || HasOffset) {
			// Root operator should be SMP_ADD, right tree is scaled IndexReg, LeftTree is BaseReg and/or offset
			MemoryAddressExpr->SetOperator(SMP_ADD);
			STARSExpression *RightExpr = new STARSExpression();
			RightExpr->SetParentInst(WriteInst);
			RightExpr->SetOriginalParentInst(WriteInst);
			RightExpr->SetParentFunc(this);
			RightExpr->SetOperator(SMP_U_MULTIPLY);
			RightExpr->SetLeftOperand(IndexRegOp);
			RightExpr->SetLeftUseAddr(WriteAddr);
			RightExpr->SetLeftSSANum(IndexUseIter->GetSSANum());
			RightExpr->SetRightOperand(WriteInst->MakeImmediateOpnd((STARS_uval_t) (1 << ScaleFactor)));
			MemoryAddressExpr->SetRightTree(RightExpr);
			if (HasBaseReg && HasOffset) { // has both; need to add them together
				STARSExpression *LeftExpr = new STARSExpression();
				LeftExpr->SetParentInst(WriteInst);
				LeftExpr->SetOriginalParentInst(WriteInst);
				LeftExpr->SetParentFunc(this);
				LeftExpr->SetOperator(SMP_ADD);
				LeftExpr->SetLeftOperand(BaseRegOp);
				LeftExpr->SetLeftUseAddr(WriteAddr);
				LeftExpr->SetLeftSSANum(BaseUseIter->GetSSANum());
				LeftExpr->SetRightOperand(OffsetOp);
				LeftExpr->SetRightUseAddr(WriteAddr);
				MemoryAddressExpr->SetLeftTree(LeftExpr);
			}
			else { // BaseReg or offset, not both; just need a LeftOperand
				if (HasBaseReg) {
					MemoryAddressExpr->SetLeftOperand(BaseRegOp);
					MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
					MemoryAddressExpr->SetLeftSSANum(BaseUseIter->GetSSANum());
				}
				else {
					MemoryAddressExpr->SetLeftOperand(OffsetOp);
					MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
				}
			}
		}
		else { // scaled IndexReg, nothing else
			MemoryAddressExpr->SetOperator(SMP_U_MULTIPLY);
			MemoryAddressExpr->SetLeftOperand(IndexRegOp);
			MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
			MemoryAddressExpr->SetLeftSSANum(IndexUseIter->GetSSANum());
			MemoryAddressExpr->SetRightOperand(WriteInst->MakeImmediateOpnd((STARS_uval_t)(1 << ScaleFactor)));
			MemoryAddressExpr->SetRightUseAddr(WriteAddr);
		}
	}
	else if (HasIndexReg) { // unscaled IndexReg
		if (HasBaseReg && HasOffset) { // has both; need to add them together: (BaseReg + offset) + IndexReg
			STARSExpression *LeftExpr = new STARSExpression();
			LeftExpr->SetParentInst(WriteInst); // also sets OriginalParentInst
			// LeftExpr->SetOriginalParentInst(WriteInst);
			LeftExpr->SetParentFunc(this);
			LeftExpr->SetOperator(SMP_ADD);
			LeftExpr->SetLeftOperand(BaseRegOp);
			LeftExpr->SetLeftUseAddr(WriteAddr);
			LeftExpr->SetLeftSSANum(BaseUseIter->GetSSANum());
			LeftExpr->SetRightOperand(OffsetOp);
			LeftExpr->SetRightUseAddr(WriteAddr);
			MemoryAddressExpr->SetOperator(SMP_ADD);
			MemoryAddressExpr->SetLeftTree(LeftExpr);
			MemoryAddressExpr->SetRightOperand(IndexRegOp);
			MemoryAddressExpr->SetRightUseAddr(WriteAddr);
			MemoryAddressExpr->SetRightSSANum(IndexUseIter->GetSSANum());
		}
		else { // BaseReg or offset, or neither, not both; just need a LeftOperand
			if (HasBaseReg) { // BaseReg + IndexReg
				MemoryAddressExpr->SetOperator(SMP_ADD);
				MemoryAddressExpr->SetLeftOperand(BaseRegOp);
				MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
				MemoryAddressExpr->SetLeftSSANum(BaseUseIter->GetSSANum());
				MemoryAddressExpr->SetRightOperand(IndexRegOp);
				MemoryAddressExpr->SetRightUseAddr(WriteAddr);
				MemoryAddressExpr->SetRightSSANum(IndexUseIter->GetSSANum());
			}
			else if (HasOffset) {  // IndexReg + offset
				MemoryAddressExpr->SetOperator(SMP_ADD);
				MemoryAddressExpr->SetLeftOperand(IndexRegOp);
				MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
				MemoryAddressExpr->SetLeftSSANum(IndexUseIter->GetSSANum());
				MemoryAddressExpr->SetRightOperand(OffsetOp);
				MemoryAddressExpr->SetRightUseAddr(WriteAddr);
			}
			else { // neither BaseReg nor offset, just IndexReg
				MemoryAddressExpr->SetOperator(SMP_ASSIGN); // signal that only LeftOperand is valid
				MemoryAddressExpr->SetLeftOperand(IndexRegOp);
				MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
				MemoryAddressExpr->SetLeftSSANum(IndexUseIter->GetSSANum());
			}
		}
	}
	else if (HasBaseReg) { // BaseReg, no IndexReg
		if (HasOffset) { // BaseReg + offset
			MemoryAddressExpr->SetOperator(SMP_ADD);
			MemoryAddressExpr->SetLeftOperand(BaseRegOp);
			MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
			MemoryAddressExpr->SetLeftSSANum(BaseUseIter->GetSSANum());
			MemoryAddressExpr->SetRightOperand(OffsetOp);
			MemoryAddressExpr->SetRightUseAddr(WriteAddr);
		}
		else { // just BaseReg
			MemoryAddressExpr->SetOperator(SMP_ASSIGN); // signal that only LeftOperand is valid
			MemoryAddressExpr->SetLeftOperand(BaseRegOp);
			MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
			MemoryAddressExpr->SetLeftSSANum(BaseUseIter->GetSSANum());
		}
	}
	else { // just offset or SegReg or SegReg+offset
		assert(HasOffset || HasSegReg);
		MemoryAddressExpr->SetOperator(SMP_ASSIGN); // signal that only LeftOperand is valid

		if (HasOffset && HasSegReg && (!IPRelative)) {
			MemoryAddressExpr->SetOperator(SMP_ADD);
			STARSOpndTypePtr SegRegOp = WriteInst->MakeRegOpnd(MemDefOp->GetSegReg());
			MemoryAddressExpr->SetLeftOperand(SegRegOp);
			MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
			MemoryAddressExpr->SetRightOperand(OffsetOp);
			MemoryAddressExpr->SetRightUseAddr(WriteAddr);
		}
		else if (HasOffset) {
			MemoryAddressExpr->SetLeftOperand(OffsetOp);
			MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
		}
		else { // only seg reg
			STARSOpndTypePtr SegRegOp = WriteInst->MakeRegOpnd(MemDefOp->GetSegReg());
			MemoryAddressExpr->SetLeftOperand(SegRegOp);
			MemoryAddressExpr->SetLeftUseAddr(WriteAddr);
		}
	}

	return MemoryAddressExpr;
} // end of SMPFunction::CreateMemoryAddressExpr()

// Utility function to find an iterator in a vector of iterators.
bool IsIterInVector(STARSMemWriteExprListIter ListIter, vector<STARSMemWriteExprListIter> IterVector) {
	bool Found = false;
	for (size_t i = 0; i < IterVector.size(); ++i) {
		if (IterVector[i] == ListIter) {
			Found = true;
			break;
		}
	}
	return Found;
} // end of IsIterInVector()

// Combine items in the expr set that have common regs, e.g. (RDI+1) and (RDI-2), into fewer set entries
void SMPFunction::CombineMemoryExprs(STARSMemWriteExprsList &MemWriteExprWidths, STARSExprSet &MemWriteExprs, vector<set<STARS_ea_t> > &StackPtrCopies) {
	if (1 >= MemWriteExprWidths.size())
		return; // cannot combine exprs unless we have two or more

	// Start with the first list item and scan for overlapping memory exprs.
	//  Restrict to simple assignment exprs like [RDI+1] and [RDI-1], not relational exprs like [RDI+RCX+1].
	//  Implement a methodical, simple search for matching exprs, combine exprs, repeat to end of list.
	STARSMemWriteExprsList NewMemWriteExprWidths;
	STARSExprSet NewExprSet; // hold combined exprs until we are ready to re-write the whole set
	STARSMemWriteExprListIter FirstListIter = MemWriteExprWidths.begin();
	vector<STARSMemWriteExprListIter> ItersToDiscard; // iters that match FirstIter
	while (FirstListIter != MemWriteExprWidths.end()) {
		STARSMemWriteExprListIter SecondListIter = FirstListIter;
		// Advance SecondListIter to the next iter location that is not already marked to be discarded.
		do {
			++SecondListIter;
		} while ((SecondListIter != MemWriteExprWidths.end()) && IsIterInVector(SecondListIter, ItersToDiscard));
		STARSExprSetIter FirstSetIter = (*FirstListIter).first;
		bool MatchSeen = false;
		STARS_sval_t MinOffset = 0, MaxOffset = 0; // MaxOffset is one beyond the last byte offset written
		STARS_sval_t FirstOffset; // save the ImmedValue of the FirstSetIter expr
		int FirstStackPtrCopiesVecIndex = (*FirstListIter).second.second;
		while (SecondListIter != MemWriteExprWidths.end()) {
			STARSExprSetIter SecondSetIter = (*SecondListIter).first;
			if ((*FirstSetIter)->IsEqualExprExceptingImmedOpnds(*SecondSetIter)) {
				ItersToDiscard.push_back(SecondListIter);
				// Keep track of minimum and maximum offsets seen from address reg.
				STARS_uval_t ImmedValue = 0;
				if (!MatchSeen) { // SecondSetIter is first match we have seen
					// Initialize MinOffset and MaxOffset
					if ((*FirstSetIter)->ExtractImmedValue(ImmedValue)) {
						STARS_sval_t SignedImmedValue = (STARS_sval_t) ImmedValue;
						MinOffset = SignedImmedValue;
						MaxOffset = SignedImmedValue + (STARS_sval_t) (*FirstListIter).second.first;
					}
					else {
						MinOffset = 0;
						MaxOffset = (STARS_sval_t)(*FirstListIter).second.first; // the size
					}
					FirstOffset = MinOffset;
					MatchSeen = true;
				}
				// See if SecondSetIter's expr has an extension of the min-max range.
				if ((*SecondSetIter)->ExtractImmedValue(ImmedValue)) {
					STARS_sval_t SignedImmedValue = (STARS_sval_t) ImmedValue;
					STARS_sval_t UpperBound = SignedImmedValue + (STARS_sval_t)(*SecondListIter).second.first; // offset + size
					if (SignedImmedValue < MinOffset)
						MinOffset = SignedImmedValue;
					if (UpperBound > MaxOffset)
						MaxOffset = UpperBound;
				}
				else { // no immediate value => offset of zero
					STARS_sval_t UpperBound = (STARS_sval_t) (*SecondListIter).second.first; // offset zero + size
					if (UpperBound > MaxOffset)
						MaxOffset = UpperBound;
				}
			} // end if ((*FirstSetIter)->IsEqualExprExceptingImmedOpnds(*SecondSetIter)) 

			int SecondStackPtrCopiesVecIndex = (*SecondListIter).second.second;
			if (0 <= SecondStackPtrCopiesVecIndex) { // non-empty set of stack ptr copy addrs
				// Merge stack ptr copy set into first set
				if (0 <= FirstStackPtrCopiesVecIndex) { // first set is also non-empty
					set<STARS_ea_t>::iterator SecondCopiesIter = StackPtrCopies[SecondStackPtrCopiesVecIndex].begin();
					while (SecondCopiesIter != StackPtrCopies[SecondStackPtrCopiesVecIndex].end()) {
						STARS_ea_t CopyAddr = (*SecondCopiesIter);
						StackPtrCopies[FirstStackPtrCopiesVecIndex].insert(CopyAddr);
						++SecondCopiesIter;
					}
				}
				else { // first set empty, second set non-empty
					// Just use the second set
					FirstStackPtrCopiesVecIndex = SecondStackPtrCopiesVecIndex;
					(*FirstListIter).second.second = FirstStackPtrCopiesVecIndex;
				}
			}
			++SecondListIter;
		} // end while (SecondListIter != MemWriteExprWidths.end()) 
		if (MatchSeen) {
			// We need to broaden the range of the FirstListIter mem write, and discard the matching iters.
			STARSExpression *NewFirstExpr = nullptr;
			if (MinOffset != FirstOffset) {
				// Cloned expr needs its ImmedValue changed to MinOffset.
				if (FirstOffset == 0) {
					// Original expr probably had no immediate operand. Create one.
					NewFirstExpr = new STARSExpression();
					// Make left tree be the original expr
					NewFirstExpr->SetLeftTree((*FirstSetIter)->Clone());
					NewFirstExpr->SetParentFunc(this);
					NewFirstExpr->SetParentInst(NewFirstExpr->GetLeftTree()->GetParentInst());
					NewFirstExpr->SetOriginalParentInst(NewFirstExpr->GetParentInst());
					NewFirstExpr->SetLeftPreLoopDefAddr(NewFirstExpr->GetLeftTree()->GetLeftPreLoopDefAddr());
					if (0 > MinOffset) {
						NewFirstExpr->SetOperator(SMP_SUBTRACT);
						// Make right operand be a new ImmedOp with value == absolute value of MinOffset.
						STARS_sval_t AbsValue = (0 - MinOffset);
						NewFirstExpr->SetRightOperand(NewFirstExpr->GetParentInst()->MakeImmediateOpnd((STARS_uval_t) AbsValue));
					}
					else { // 0 < MinOffset
						NewFirstExpr->SetOperator(SMP_ADD);
						// Make right operand be a new ImmedOp with value == MinOffset.
						NewFirstExpr->SetRightOperand(NewFirstExpr->GetParentInst()->MakeImmediateOpnd((STARS_uval_t) MinOffset));
					}
				}
				else { // FirstOffset != 0
					// First expr had an immediate operand; find it and change its value.
					NewFirstExpr = (*FirstSetIter)->Clone();
					bool UpdatedImmed = NewFirstExpr->UpdateImmedValue((STARS_uval_t) FirstOffset, (STARS_uval_t) MinOffset);
					assert(UpdatedImmed);
				}
			}
			else { // MinOffset == FirstOffset
				// Just need to clone first expr and set width to new range
				NewFirstExpr = (*FirstSetIter)->Clone();
			}
			// NewFirstExpr now has the correct MinOffset.
			pair<STARSExprSetIter, bool> InsertResult = NewExprSet.insert(NewFirstExpr);
			assert(InsertResult.second); // All exprs that are equal except for offsets are separated and unique
			size_t NewRange = ((size_t) (MaxOffset - MinOffset));
			pair<size_t, int> InsertPair2(NewRange, FirstStackPtrCopiesVecIndex);
			pair<STARSExprSetIter, pair<size_t, int> > InsertValue(InsertResult.first, InsertPair2);
			NewMemWriteExprWidths.push_back(InsertValue);
		} // end if (MatchSeen)
		else { // !MatchSeen
			// FirstSetIter expr was unique.
			pair<STARSExprSetIter, bool> InsertResult = NewExprSet.insert(*FirstSetIter); // keep old expr
			assert(InsertResult.second); // All exprs that are equal except for offsets are separated and unique
			size_t OldRange = (*FirstListIter).second.first; // keep the old width
			int OldCopiesVecIndex = (*FirstListIter).second.second; // keep the old index
			pair<size_t, int> InsertPair2(OldRange, OldCopiesVecIndex);
			pair<STARSExprSetIter, pair<size_t, int> > InsertValue(InsertResult.first, InsertPair2);
			NewMemWriteExprWidths.push_back(InsertValue);
		}
		// Now move on to the next unique base expr.
		do {
			++FirstListIter;
		} while ((FirstListIter != MemWriteExprWidths.end()) && IsIterInVector(FirstListIter, ItersToDiscard));
	} // end while ((FirstListIter != MemWriteExprWidths.end()) && (1 < MemWriteExprWidths.size()))
	
	// If we combined any exprs, replace our arguments with the new expr set and new list of widths and ExprSetIters.
	if (MemWriteExprWidths.size() != NewMemWriteExprWidths.size()) {
		// Some combining happened. Copy new set and new list to arguments.
		assert(MemWriteExprWidths.size() > NewMemWriteExprWidths.size());
		MemWriteExprs.clear();
		MemWriteExprs.insert(NewExprSet.begin(), NewExprSet.end());
		MemWriteExprWidths.clear();
		// Synchronize the iters in MemWriteExprWidths to the new MemWriteExprs locations.
		STARSMemWriteExprsList::const_iterator NewListIter;
		for (NewListIter = NewMemWriteExprWidths.cbegin(); NewListIter != NewMemWriteExprWidths.cend(); ++NewListIter) {
			STARSExprSetIter NewSetIter = (*NewListIter).first;
			size_t NewWidth = (*NewListIter).second.first;
			// Find the matching expr that we just inserted into MemWriteExprs.
			STARSExprSetIter OldSetIter = MemWriteExprs.find(*NewSetIter);
			assert(OldSetIter != MemWriteExprs.end());
			pair<size_t, int> InsertPair2(NewWidth, (*NewListIter).second.second);
			pair<STARSExprSetIter, pair<size_t, int> > InsertValue(OldSetIter, InsertPair2);
			MemWriteExprWidths.push_back(InsertValue);
		}
	}

	return;
} // end of SMPFunction::CombineMemoryExprs()

// Replace occurrences of IVFamily.BIV in CurrExpr with BIV InitExpr or LimitExpr
bool SMPFunction::ReplaceBIVWithExpr(std::size_t LoopIndex, const struct InductionVarFamily &IVFamily, STARSExpression *CurrExpr, bool InitCase) {
	bool success = false;

	// Traverse CurrExpr to find the BIV from IVFamily.
	if (!CurrExpr->HasLeftSubTree()) {
		if ((CurrExpr->GetLeftSSANum() == IVFamily.BasicInductionVar.InductionVar.GetSSANum())
			&& IsEqOp(CurrExpr->GetConstLeftOperand(), IVFamily.BasicInductionVar.InductionVar.GetOp())) {
			// Matched. Replace CurrExpr->LeftOperand with InitExpr or LimitExpr->GetRightTree().
			//  For LimitExpr, the LeftOperand is compared to the RightOperand or RightTree, so the right side is the limit.
			if (InitCase) {
				STARSExpression *ReplaceExpr = this->LoopIterationsInitExprs[LoopIndex];
				STARSExpression *FinalExpr = ReplaceExpr->Clone();
				CurrExpr->SetLeftTree(FinalExpr);
				CurrExpr->SetLeftOperand(nullptr);
			}
			else { // LimitExpr case
				if (this->LoopIterationsLimitExprs[LoopIndex]->HasRightSubTree()) {
					STARSExpression *ReplaceExpr = this->LoopIterationsLimitExprs[LoopIndex]->GetRightTree();
					STARSExpression *FinalExpr = ReplaceExpr->Clone();
					CurrExpr->SetLeftTree(FinalExpr);
					CurrExpr->SetLeftOperand(nullptr);
				}
				else { // just a RightOperand, so replace CurrExpr->LeftOperand with LimitExpr->RightOperand
					CurrExpr->SetLeftOperand(this->LoopIterationsLimitExprs[LoopIndex]->GetRightOperand());
					CurrExpr->SetLeftUseAddr(this->LoopIterationsLimitExprs[LoopIndex]->GetRightUseAddr());
					CurrExpr->SetLeftSSANum(this->LoopIterationsLimitExprs[LoopIndex]->GetRightSSANum());
					CurrExpr->SetLeftPreLoopDefAddr(this->LoopIterationsLimitExprs[LoopIndex]->GetRightPreLoopDefAddr());
				}
			}
			success = true;
		}
	}
	else {
		success = this->ReplaceBIVWithExpr(LoopIndex, IVFamily, CurrExpr->GetLeftTree(), InitCase); // recurse
	}
	if (!CurrExpr->HasRightSubTree()) {
		if ((CurrExpr->GetRightSSANum() == IVFamily.BasicInductionVar.InductionVar.GetSSANum())
			&& IsEqOp(CurrExpr->GetConstRightOperand(), IVFamily.BasicInductionVar.InductionVar.GetOp())) {
			// Matched. Replace CurrExpr->RightOperand with InitExpr or LimitExpr->GetRightTree() or LimitExpr->GetRightOperand().
			//  For LimitExpr, the LeftOperand is compared to to the RightOperand or RightTree, so the right side is the limit.
			if (InitCase) {
				STARSExpression *ReplaceExpr = this->LoopIterationsInitExprs[LoopIndex];
				STARSExpression *FinalExpr = ReplaceExpr->Clone();
				CurrExpr->SetRightTree(FinalExpr);
				CurrExpr->SetRightOperand(nullptr);
			}
			else { // LimitExpr case
				if (this->LoopIterationsLimitExprs[LoopIndex]->HasRightSubTree()) {
					STARSExpression *ReplaceExpr = this->LoopIterationsLimitExprs[LoopIndex]->GetRightTree();
					STARSExpression *FinalExpr = ReplaceExpr->Clone();
					CurrExpr->SetRightTree(FinalExpr);
					CurrExpr->SetRightOperand(nullptr);
				}
				else { // just a RightOperand, so replace CurrExpr->RightOperand with LimitExpr->RightOperand
					CurrExpr->SetRightOperand(this->LoopIterationsLimitExprs[LoopIndex]->GetRightOperand());
					CurrExpr->SetRightUseAddr(this->LoopIterationsLimitExprs[LoopIndex]->GetRightUseAddr());
					CurrExpr->SetRightSSANum(this->LoopIterationsLimitExprs[LoopIndex]->GetRightSSANum());
					CurrExpr->SetRightPreLoopDefAddr(this->LoopIterationsLimitExprs[LoopIndex]->GetRightPreLoopDefAddr());
				}
			}
			success = true;
		}
	}
	else {
		success |= this->ReplaceBIVWithExpr(LoopIndex, IVFamily, CurrExpr->GetRightTree(), InitCase); // recurse
	}

	return success;
} // end of SMPFunction::ReplaceBIVWithExpr()

// Replace all BIVs in CurrExpr with lower or upper limit (depending on InitCase) exprs for BIV
bool SMPFunction::ReplaceAllBIVsWithExprs(std::size_t LoopIndex, STARSExpression *CurrExpr, bool InitCase, bool &changed) {
	bool success = true;
	bool UseFP = this->UsesFramePointer();
	size_t FamilyIndex;
	STARSInductionVarFamilyIter ListIter;

	// Traverse CurrExpr to find the induction vars.
	if (!CurrExpr->HasLeftSubTree()) {
		const STARSOpndTypePtr CurrOp = CurrExpr->GetConstLeftOperand();
		if (MDIsDataFlowOpnd(CurrOp, UseFP)) {
			SMPInstr *UseInst = CurrExpr->GetParentInst();
			int CurrSSANum = CurrExpr->GetLeftSSANum();
			int IVLoopIndex = -1;
			if (this->IsLoopNestInductionVar(CurrOp, UseInst, ListIter, FamilyIndex, IVLoopIndex)) {
				if (0 == FamilyIndex) { // BIV, not DIV
					struct InductionVarFamily IVFamily = (*ListIter);
					if ((CurrSSANum == IVFamily.BasicInductionVar.InductionVar.GetSSANum())
						&& IsEqOp(CurrOp, IVFamily.BasicInductionVar.InductionVar.GetOp())) {
						// Matched. Replace CurrExpr->LeftOperand with InitExpr or LimitExpr->GetRightTree()
						//  from the BIV matched.
						//  For LimitExpr, the LeftOperand is compared to the RightOperand or RightTree, so the right side is the limit.
						if (InitCase) {
							if (nullptr == IVFamily.BIVInitExpr) {
								assert(0 <= IVLoopIndex);
								success = false;
								SMP_msg("ERROR: null IVFamily.BIVInitExpr: IVFamily Dump follows.\n");
								DumpInductionVarFamily(IVFamily);
							}
							else {
								STARSExpression *ReplaceExpr = IVFamily.BIVInitExpr;

								// The InitExpr for an IV can be part of the InitExpr
								//  for a memory access. Avoid infinite recursion by seeing
								//  if we have matching exprs.
								if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
									STARSExpression *FinalExpr = ReplaceExpr->Clone();
									CurrExpr->SetLeftTree(FinalExpr);
									CurrExpr->SetLeftOperand(nullptr);
									CurrExpr->SetParentInst(FinalExpr->GetParentInst());
									changed = true;
									success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
								}
								else { // Exprs are equal
									success = true; // no more replacing can be done
								}
							}
						}
						else { // LimitExpr case
							if (nullptr == IVFamily.BIVLimitExpr) {
								assert(0 <= IVLoopIndex);
								success = false;
								SMP_msg("ERROR: null IVFamily.BIVLimitExpr: IVFamily Dump follows.\n");
								DumpInductionVarFamily(IVFamily);
							}
							else {
								// NOTE: There are two cases. If BIVLimitExpr is for the BIV that terminates
								//  the loop, then it will have a relational operator, e.g. RCX < k. If BIVLimitExpr
								//  is for a secondary BIV, it will have the form RSI + k, which means "incoming value
								//  of RSI plus k," where we need the InitExpr for RSI to determine the actual limit.
								if (IsRelationalOperator(IVFamily.BIVLimitExpr->GetOperator())) {
									if (IVFamily.BIVLimitExpr->HasRightSubTree()) {
										STARSExpression *ReplaceExpr = IVFamily.BIVLimitExpr->GetRightTree();
										
										// The LimitExpr for an IV can be part of the LimitExpr
										//  for a memory access. Avoid infinite recursion by seeing
										//  if we have matching exprs.
										if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
											STARSExpression *FinalExpr = ReplaceExpr->Clone();
											CurrExpr->SetLeftTree(FinalExpr);
											CurrExpr->SetLeftOperand(nullptr);
											CurrExpr->SetParentInst(FinalExpr->GetParentInst());
											changed = true;
											success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
										}
										else { // Exprs are equal
											success = true; // no more replacing can be done
										}
									}
									else { // just a RightOperand, so replace CurrExpr->LeftOperand with BIVLimitExpr->RightOperand
										bool SameOpnds = ((CurrExpr->GetLeftSSANum() == IVFamily.BIVLimitExpr->GetRightSSANum())
											&& IsEqOp(CurrExpr->GetConstLeftOperand(), IVFamily.BIVLimitExpr->GetConstRightOperand()));
										if (!SameOpnds) {
											CurrExpr->SetLeftOperand(IVFamily.BIVLimitExpr->GetRightOperand());
											CurrExpr->SetLeftUseAddr(IVFamily.BIVLimitExpr->GetRightUseAddr());
											CurrExpr->SetLeftSSANum(IVFamily.BIVLimitExpr->GetRightSSANum());
											CurrExpr->SetLeftPreLoopDefAddr(IVFamily.BIVLimitExpr->GetRightPreLoopDefAddr());
											success = true;
											CurrExpr->SetParentInst(IVFamily.BIVLimitExpr->GetParentInst());
											changed = true;
										}
										else { // Opnds are equal
											success = true; // no more replacing can be done
										}
									}
								}
								else {
									// Secondary BIV case. Simplest approach is to get the BIVInitExpr
									//  and use it to replace the left operand as a new left tree.
									STARSExpression *ReplaceExpr = IVFamily.BIVInitExpr;

									if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
										STARSExpression *FinalExpr = ReplaceExpr->Clone();
										CurrExpr->SetLeftTree(FinalExpr);
										CurrExpr->SetLeftOperand(nullptr);
										CurrExpr->SetParentInst(FinalExpr->GetParentInst());
										changed = true;
										success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
									}
									else { // Exprs are equal
										success = true; // no more replacing can be done
									}
								}
							}
						} // end if (InitCase) ... else ...
					}
					else {
						// Alternative: Should we not insist on SSANum match? !!!!****!!!!
						success = false;
						SMP_msg("ERROR: CurrSSANum = %d does not match; IVFamily dump follows.\n", CurrSSANum);
						DumpInductionVarFamily(IVFamily);
					}
				}
			}
		}
	}
	else {
		success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
	}
	if (!success)
		return success;

	if (!CurrExpr->HasRightSubTree()) {
		const STARSOpndTypePtr CurrOp = CurrExpr->GetConstRightOperand();
		if (MDIsDataFlowOpnd(CurrOp, UseFP)) {
			SMPInstr *UseInst = CurrExpr->GetParentInst();
			int CurrSSANum = CurrExpr->GetRightSSANum();
			int IVLoopIndex = -1;
			if (this->IsLoopNestInductionVar(CurrOp, UseInst, ListIter, FamilyIndex, IVLoopIndex)) {
				if (0 == FamilyIndex) { // BIV, not DIV
					struct InductionVarFamily IVFamily = (*ListIter);
					if ((CurrSSANum == IVFamily.BasicInductionVar.InductionVar.GetSSANum())
						&& IsEqOp(CurrOp, IVFamily.BasicInductionVar.InductionVar.GetOp())) {
						// Matched. Replace CurrExpr->RightOperand with InitExpr or LimitExpr->GetRightTree() or LimitExpr->GetRightOperand().
						//  For LimitExpr, the LeftOperand is compared to to the RightOperand or RightTree, so the right side is the limit.
						if (InitCase) {
							if (nullptr == IVFamily.BIVInitExpr) {
								assert(0 <= IVLoopIndex);
								success = false;
								SMP_msg("ERROR: null IVFamily.BIVInitExpr: IVFamily Dump follows.\n");
								DumpInductionVarFamily(IVFamily);
							}
							else {
								STARSExpression *ReplaceExpr = IVFamily.BIVInitExpr;

								// The InitExpr for an IV can be part of the InitExpr
								//  for a memory access. Avoid infinite recursion by seeing
								//  if we have matching exprs.
								if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
									STARSExpression *FinalExpr = ReplaceExpr->Clone();
									CurrExpr->SetRightTree(FinalExpr);
									CurrExpr->SetRightOperand(nullptr);
									CurrExpr->SetParentInst(FinalExpr->GetParentInst());
									changed = true;
									success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
								}
								else { // Exprs are equal
									success = true; // no more replacing can be done
								}
							}
						}
						else if (nullptr == IVFamily.BIVLimitExpr) { // LimitExpr case, safeguard
							assert(0 <= IVLoopIndex);
							success = false;
							SMP_msg("ERROR: null IVFamily.BIVLimitExpr: IVFamily Dump follows.\n");
							DumpInductionVarFamily(IVFamily);
						}
						else { // LimitExpr case, safe to proceed
							// NOTE: There are two cases. If BIVLimitExpr is for the BIV that terminates
							//  the loop, then it will have a relational operator, e.g. RCX < k. If BIVLimitExpr
							//  is for a secondary BIV, it will have the form RSI + k, which means "incoming value
							//  of RSI plus k," where we need the InitExpr for RSI to determine the actual limit.
							if (IsRelationalOperator(IVFamily.BIVLimitExpr->GetOperator())) {
								if (IVFamily.BIVLimitExpr->HasRightSubTree()) {
									STARSExpression *ReplaceExpr = IVFamily.BIVLimitExpr->GetRightTree();

									// The LimitExpr for an IV can be part of the LimitExpr
									//  for a memory access. Avoid infinite recursion by seeing
									//  if we have matching exprs.
									if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
										STARSExpression *FinalExpr = ReplaceExpr->Clone();
										CurrExpr->SetRightTree(FinalExpr);
										CurrExpr->SetRightOperand(nullptr);
										CurrExpr->SetParentInst(FinalExpr->GetParentInst());
										changed = true;
										success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
									}
									else { // Exprs are equal
										success = true; // no more replacing can be done
									}
								}
								else { // just a RightOperand, so replace CurrExpr->RightOperand with BIVLimitExpr->RightOperand
									bool SameOpnds = ((CurrExpr->GetRightSSANum() == IVFamily.BIVLimitExpr->GetRightSSANum())
										&& IsEqOp(CurrExpr->GetConstRightOperand(), IVFamily.BIVLimitExpr->GetConstRightOperand()));
									if (!SameOpnds) {
										CurrExpr->SetRightOperand(IVFamily.BIVLimitExpr->GetRightOperand());
										CurrExpr->SetRightUseAddr(IVFamily.BIVLimitExpr->GetRightUseAddr());
										CurrExpr->SetRightSSANum(IVFamily.BIVLimitExpr->GetRightSSANum());
										CurrExpr->SetRightPreLoopDefAddr(IVFamily.BIVLimitExpr->GetRightPreLoopDefAddr());
										CurrExpr->SetParentInst(IVFamily.BIVLimitExpr->GetParentInst());
										changed = true;
										success = true;
									}
									else { // Opnds are equal
										success = true; // no more replacing can be done
									}
								}
							}
							else {
								// Secondary BIV case. Simplest approach is to get the BIVInitExpr
								//  and use it to replace the right operand as a new right tree.
								STARSExpression *ReplaceExpr = IVFamily.BIVInitExpr;

								// The InitExpr for an IV can be part of the InitExpr
								//  for a memory access. Avoid infinite recursion by seeing
								//  if we have matching exprs.
								if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
									STARSExpression *FinalExpr = ReplaceExpr->Clone();
									CurrExpr->SetRightTree(FinalExpr);
									CurrExpr->SetRightOperand(nullptr);
									CurrExpr->SetParentInst(FinalExpr->GetParentInst());
									changed = true;
									success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
								}
								else { // Exprs are equal
									success = true; // no more replacing can be done
								}
							}
						} // end if (InitCase) ... else ...
					}
					else {
						// Alternative: Should we not insist on SSANum match? !!!!****!!!!
						success = false;
						SMP_msg("ERROR: CurrSSANum = %d does not match; IVFamily dump follows.\n");
						DumpInductionVarFamily(IVFamily);
					}
				}
			}
		}
	}
	else {
		success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
	}

	return success;
} // end of SMPFunction::ReplaceAllBIVsWithExprs()

// Replace all DIVs in CurrExpr with lower or upper limit (depending on InitCase) exprs for DIV
bool SMPFunction::ReplaceAllDIVsWithExprs(std::size_t LoopIndex, STARSExpression *CurrExpr, bool InitCase, bool &changed) {
	bool success = true;
	bool UseFP = this->UsesFramePointer();
	size_t FamilyIndex;
	STARSInductionVarFamilyIter ListIter;

	// Traverse CurrExpr to find the induction vars.
	if (!CurrExpr->HasLeftSubTree()) {
		STARSOpndTypePtr CurrOp = CurrExpr->GetLeftOperand();
		if (MDIsDataFlowOpnd(CurrOp, UseFP)) {
			SMPInstr *UseInst = CurrExpr->GetParentInst();
			int CurrSSANum = CurrExpr->GetLeftSSANum();
			int IVLoopIndex = -1;
			if (this->IsLoopNestInductionVar(CurrOp, UseInst, ListIter, FamilyIndex, IVLoopIndex)) {
				if (0 < FamilyIndex) { // DIV, not BIV
					struct InductionVarFamily IVFamily = (*ListIter);
					size_t DIVIndex = (FamilyIndex - 1);
					if (CurrSSANum == IVFamily.DependentInductionVars[DIVIndex].DIV.GetSSANum()) {
						// Matched. Replace CurrExpr->LeftOperand with InitExpr or LimitExpr->GetRightTree()
						//  from the DIV matched.
						//  For LimitExpr, the LeftOperand is compared to the RightOperand or RightTree, so the right side is the limit.
						if (InitCase) {
							if (nullptr == IVFamily.DependentInductionVars[DIVIndex].DIVInitExpr) {
								assert(0 <= IVLoopIndex);
								success = false;
							}
							else {
								STARSExpression *ReplaceExpr = IVFamily.DependentInductionVars[DIVIndex].DIVInitExpr;

								if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
									STARSExpression *FinalExpr = ReplaceExpr->Clone();
									CurrExpr->SetLeftTree(FinalExpr);
									CurrExpr->SetLeftOperand(nullptr);
									CurrExpr->SetParentInst(FinalExpr->GetParentInst());
									changed = true;
									// Recurse; could have DIV dependent on another DIV.
									success = this->ReplaceAllIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
								}
								else { // Exprs are equal
									success = true; // no more replacing can be done
								}
							}
						}
						else { // LimitExpr case
							if (nullptr == IVFamily.DependentInductionVars[DIVIndex].DIVLimitExpr) {
								assert(0 <= IVLoopIndex);
								success = false;
							}
							else {
								STARSExpression *ReplaceExpr = IVFamily.DependentInductionVars[DIVIndex].DIVLimitExpr;

								if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
									STARSExpression *FinalExpr = ReplaceExpr->Clone();
									CurrExpr->SetLeftTree(FinalExpr);
									CurrExpr->SetLeftOperand(nullptr);
									CurrExpr->SetParentInst(FinalExpr->GetParentInst());
									changed = true;
									// Recurse; could have DIV dependent on another DIV.
									success = this->ReplaceAllIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
								}
								else { // Exprs are equal
									success = true; // no more replacing can be done
								}
							}
						} // end if (InitCase) ... else ...
					}
					else {
						// Alternative: Should we not insist on SSANum match? !!!!****!!!!
						success = false;
					}
				}
			}
		}
	}
	else {
		success = this->ReplaceAllDIVsWithExprs(LoopIndex, CurrExpr->GetLeftTree(), InitCase, changed); // recurse
	}
	if (!success)
		return success;

	if (!CurrExpr->HasRightSubTree()) {
		const STARSOpndTypePtr CurrOp = CurrExpr->GetConstRightOperand();
		if (MDIsDataFlowOpnd(CurrOp, UseFP)) {
			SMPInstr *UseInst = CurrExpr->GetParentInst();
			int CurrSSANum = CurrExpr->GetRightSSANum();
			int IVLoopIndex = -1;
			if (this->IsLoopNestInductionVar(CurrOp, UseInst, ListIter, FamilyIndex, IVLoopIndex)) {
				if (0 < FamilyIndex) { // DIV, not BIV
					struct InductionVarFamily IVFamily = (*ListIter);
					size_t DIVIndex = (FamilyIndex - 1);
					if (CurrSSANum == IVFamily.DependentInductionVars[DIVIndex].DIV.GetSSANum()) {
						// Matched. Replace CurrExpr->RightOperand with InitExpr or LimitExpr->GetRightTree() or LimitExpr->GetRightOperand().
						//  For LimitExpr, the LeftOperand is compared to to the RightOperand or RightTree, so the right side is the limit.
						if (InitCase) {
							if (nullptr == IVFamily.DependentInductionVars[DIVIndex].DIVInitExpr) {
								assert(0 <= IVLoopIndex);
								success = false;
							}
							else {
								STARSExpression *ReplaceExpr = IVFamily.DependentInductionVars[DIVIndex].DIVInitExpr;

								if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
									STARSExpression *FinalExpr = ReplaceExpr->Clone();
									CurrExpr->SetRightTree(FinalExpr);
									CurrExpr->SetRightOperand(nullptr);
									CurrExpr->SetParentInst(FinalExpr->GetParentInst());
									changed = true;
									// Recurse; could have DIV dependent on another DIV.
									success = this->ReplaceAllIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
								}
								else { // Exprs are equal
									success = true; // no more replacing can be done
								}
							}
						}
						else if (nullptr == IVFamily.DependentInductionVars[DIVIndex].DIVLimitExpr) { // LimitExpr case, safeguard
							assert(0 <= IVLoopIndex);
							success = false;
						}
						else { // LimitExpr case, safe to proceed
							STARSExpression *ReplaceExpr = IVFamily.DependentInductionVars[DIVIndex].DIVLimitExpr;

							if ((*ReplaceExpr < *CurrExpr) || (*CurrExpr < *ReplaceExpr)) {
								STARSExpression *FinalExpr = ReplaceExpr->Clone();
								CurrExpr->SetRightTree(FinalExpr);
								CurrExpr->SetRightOperand(nullptr);
								CurrExpr->SetParentInst(FinalExpr->GetParentInst());
								changed = true;
								// Recurse; could have DIV dependent on another DIV.
								success = this->ReplaceAllIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
							}
							else { // Exprs are equal
								success = true; // no more replacing can be done
							}
						} // end if (InitCase) ... else ...
					}
					else {
						// Alternative: Should we not insist on SSANum match? !!!!****!!!!
						success = false;
					}
				}
			}
		}
	}
	else {
		success = this->ReplaceAllDIVsWithExprs(LoopIndex, CurrExpr->GetRightTree(), InitCase, changed); // recurse
	}

	return success;
} // end of SMPFunction::ReplaceAllDIVsWithExprs()

// wrapper to call ReplaceAllBIVsWithExprs() and ReplaceAllDIVsWithExprs()
bool SMPFunction::ReplaceAllIVsWithExprs(std::size_t LoopIndex, STARSExpression *CurrExpr, bool InitCase, bool &changed) {
	bool success = this->ReplaceAllBIVsWithExprs(LoopIndex, CurrExpr, InitCase, changed);
	if (!success)
		return success;
	success = this->ReplaceAllDIVsWithExprs(LoopIndex, CurrExpr, InitCase, changed); 

	return success;
} // end of SMPFunction::ReplaceAllIVsWithExprs()

// expand, replace IVs, simplify, until no more can be done
bool SMPFunction::ExpandExprToInArgs(std::size_t LoopIndex, STARSExpression *CurrExpr, bool InitCase, set<STARS_ea_t> &StackPtrCopySet) {
	bool success = true;
	bool changed = true;
	bool StoppedOnIV = true;
#if 0
	STARS_ea_t OrigUseAddr = CurrExpr->GetOriginalParentInst()->GetAddr();
#else
	STARS_ea_t OrigUseAddr = CurrExpr->GetParentInst()->GetAddr();
#endif
	set<int> LoopRegHashes;
	uint32_t IterationCount = 0;
	while (success && changed && StoppedOnIV) {
		changed = false;
		int DepthCounter = 0;
		success = CurrExpr->ExpandExpr(OrigUseAddr, LoopIndex, false, true, false, false, InitCase, LoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter);
		if (success && StoppedOnIV) {
			success = this->ReplaceAllIVsWithExprs(LoopIndex, CurrExpr, InitCase, changed);
			if (!success) {
				SMP_msg("ERROR: LOOP: ReplaceAllIVs failure in ExpandExprToInArgs, InitCase %d iteration %u for UseAddr %llx loop %zu\n",
					InitCase, IterationCount, (uint64_t)OrigUseAddr, LoopIndex);
			}
		}
		else {
			SMP_msg("ERROR: LOOP: ExpandExpr failure in ExpandExprToInArgs, InitCase %d iteration %u for UseAddr %llx loop %zu\n",
				InitCase, IterationCount, (uint64_t) OrigUseAddr, LoopIndex);
		}
		if (success) {
			CurrExpr->EvaluateConsts();
			bool changed2 = CurrExpr->SimplifyDriver();
			changed = (changed || changed2);
		}
		++IterationCount;
	}

	return success;
} // end of SMPFunction::ExpandExprToInArgs()

// Fill in LoopMemRangeInArgRegsBitmap and MemRangeRegsBitmap and InArgsUsedInMemWrites
void SMPFunction::DetectInArgRegsNeededForMemWriteExprs(void) {
	bool UseFP = this->UsesFramePointer();
	bitset<1 + MD_LAST_REG_NO> CurrArgRegsUsed;
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		STARSExprBoundsIter ExprIter;
		for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
			STARSExpression *LowerExpr = ExprIter->first;
			STARSExpression *UpperExpr = ExprIter->second;
			if ((nullptr != LowerExpr) && (nullptr != UpperExpr)) {
				STARSExpression *CurrExpr = UpperExpr;
				if (nullptr != CurrExpr) {
					CurrExpr->ListInArgRegsUsed(CurrArgRegsUsed);
					CurrExpr = LowerExpr;
					assert(nullptr != CurrExpr);
					CurrExpr->ListInArgRegsUsed(CurrArgRegsUsed);
					this->MemRangeRegsBitmap |= CurrArgRegsUsed;
					this->LoopMemRangeInArgRegsBitmap[LoopIndex] |= CurrArgRegsUsed;
				}
			}
		} // end for all loop mem write exprs in current loop
	}

	// Look at all memory writes and find loop-invariant non-stack indirect writes.
	//  See which ones can be traced back to InArgs.
	// Also look at calls, and see which callees have MemExprs that trace back to InArgs that we pass to the callee.
	//  See if any of those MemExprs can be traced back to our own InArgs.
	for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
		if (CurrBlock->HasIndirectMemWrite()) {
			// Ensure that mem write address regs are loop-invariant w.r.t. outermost loop containing the write
			int OuterLoopIndex = this->GetOutermostLoopNum((int) BlockIndex);
			// OuterLoopIndex could be -1 if CurrInst is not in a loop
			bool NotInLoop = (0 > OuterLoopIndex);
			int InnerLoopIndex = this->GetInnermostLoopNum((int) BlockIndex);
			size_t InnerLoopNumPlusOne = (size_t)(InnerLoopIndex + 1);
			list<size_t> LoopList; // fill on demand
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				// NOTE: Handle looping string opcodes later.
				if (CurrInst->HasIndirectMemoryWrite() && (!CurrInst->MDIsPrefetchOpcode()) 
					&& (!(CurrInst->MDIsPossibleStringLoopingOpcode() && CurrInst->MDHasAnyRepeatPrefix()))) {
					STARSOpndTypePtr DefOp = CurrInst->GetMemDef();
					assert(DefOp->IsMemOp());
					if (!MDIsDirectStackAccessOpnd(DefOp, UseFP)) {
						STARSDefUseSet AddressRegs;
						bool success = CurrInst->GetAddressRegs(AddressRegs);
						if (success && (1 == AddressRegs.size())) { // limit to one address reg for now
							STARS_ea_t MemWriteAddr = CurrInst->GetAddr();
#if 0						// Get rid of CurrInst->CreateMemDefAddrExpr()
							STARSExpression *DefExpr = CurrInst->CreateMemDefAddrExpr();
#else
							STARSExpression *DefExpr = this->CreateMemoryAddressExpr(DefOp, CurrInst);
#endif
							if (nullptr != DefExpr) {
								bool LoopInvariantDef = ((!NotInLoop) && this->IsUseLoopInvariantDEF(OuterLoopIndex, DefOp, CurrInst));
								bool StoppedOnIV = false;
								size_t DefWidth = (size_t) DefOp->GetByteWidth();
								if (NotInLoop || LoopInvariantDef) {
									set<int> DummyLoopRegHashes;
									set<STARS_ea_t> StackPtrCopySet;
									bool changed = false;
									int DepthCounter = 0;
									if (DefExpr->ExpandExpr(MemWriteAddr, OuterLoopIndex, false, true, false, false, false, DummyLoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
										if (!StoppedOnIV) {
											// We want a single node, an incoming arg.
											if (!DefExpr->HasLeftSubTree() && (!DefExpr->HasRightSubTree()) && (0 == DefExpr->GetLeftSSANum())) {
												STARSOpndTypePtr LeftOp = DefExpr->GetLeftOperand();
												if (MDIsDataFlowOpnd(LeftOp, UseFP)) {
													// Record in InArgsUsedInMemWrites
													pair<STARSExprSetIter, bool> InsertResult = this->InArgsUsedInMemWrites[InnerLoopNumPlusOne].insert(DefExpr);
													STARSOpndTypePtr AddrRegOp = AddressRegs.cbegin()->GetOp();
													if (InsertResult.second) {
														pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, DefWidth);
														this->InArgsUsedInMemWriteByteWidths[InnerLoopNumPlusOne].push_back(InsertValue);
													}
													// Even if this is a duplicate expr, make sure we map the InArg reg to the LeftOp reg.
													//  e.g. [rbx+16] and [rbp+8] could both expand to the same DefExpr [RDI+48]. Both RBX
													//  and RBP need to be mapped to RDI for assertions at the MemWriteAddr.
													// Save mapping of mem address reg operand to InArg operand
													if (NotInLoop) { // loop examples need to pass saved InArg arguments
														STARSInArgMapping InsertVal(MemWriteAddr, STARSOpndPair(AddrRegOp, LeftOp));
														pair<STARSInArgMap::iterator, bool> InsertResult = this->MemWriteToInArgMap.insert(InsertVal);
														assert(InsertResult.second);
													}
													else if (AddrRegOp->IsRegOp()) {
														// Update InArgs needed by loop
														if (LoopList.empty()) {
															this->BuildLoopList((int) BlockIndex, LoopList);
														}
														STARS_regnum_t InArgRegNum = AddrRegOp->GetReg();
														assert(!LoopList.empty());
														for (list<size_t>::const_iterator LoopIter = LoopList.cbegin(); LoopIter != LoopList.cend(); ++LoopIter) {
															size_t CurrLoopNum = (*LoopIter);
															this->LoopMemRangeInArgRegsBitmap[CurrLoopNum].set(InArgRegNum);
														}
													}
												}
											}
										}
										else { // StoppedOnIV
											this->SymbolicAnalysisProblems[InnerLoopNumPlusOne] = true;
											SMP_msg("ERROR: SPARK: Expr Expand() StoppedOnIV in DetectInArgRegsNeededForMemWriteExprs() at %llx\n",
												(uint64_t) MemWriteAddr);
											pair<STARSExprSetIter, bool> InsertResult = this->StoppedOnIVNonRangeExprs[InnerLoopNumPlusOne].insert(DefExpr);
											if (InsertResult.second) { // new Expr
												pair<STARSExprSetIter, size_t> InsertValue(InsertResult.first, DefWidth);
												this->StoppedOnIVNonRangeIterWidths[InnerLoopNumPlusOne].push_back(InsertValue);
											}
										}
									}
									else {
										this->SymbolicAnalysisProblems[InnerLoopNumPlusOne] = true;
										SMP_msg("ERROR: ExpandExpr() failure in DetectInArgRegsNeededForMemWriteExprs() at %llx\n",
											(uint64_t) MemWriteAddr);
									}
								}
							}
							else {
								SMP_msg("ERROR: CreateMemDefAddrExpr() failure in DetectInArgRegsNeededForMemWriteExprs() at %llx\n",
									(uint64_t) MemWriteAddr);
							}
						}
					} // end if not direct stack access operand
				}
				// Build looping string mem writing exprs
				if (CurrInst->MDHasAnyRepeatPrefix() && CurrInst->MDIsPossibleStringLoopingOpcode()) {
					this->BuildLoopingStringMemExprs(CurrBlock, CurrInst);
				}
			} // end for all insts in CurrBlock
		}
	} // end for all blocks in func

	return;
} // end of SMPFunction::DetectInArgRegsNeededForMemWriteExprs()

// take LoopRegHashSets and expand into LoopMemAddrRegSourceExprs
void SMPFunction::ExpandLoopRegHashExprs(void) {
	for (size_t LoopNum = 0; LoopNum < this->GetNumLoops(); ++LoopNum) {
		if (!this->LoopRegHashSets[LoopNum].empty()) {
			// For each reg hash, find the DefExpr, expand it, record in LoopMemAddrRegSourceExprs.
			for (set<int>::const_iterator HashIter = this->LoopRegHashSets[LoopNum].cbegin(); HashIter != this->LoopRegHashSets[LoopNum].cend(); ++HashIter) {
				int DefHashValue = (*HashIter);
				STARS_regnum_t DefRegNo = (STARS_regnum_t) ExtractRegFromHash(DefHashValue);
				STARS_ea_t DefAddr = this->GetGlobalDefAddrForRegHash(DefHashValue);
				if ((STARS_BADADDR == DefAddr) || STARS_IsBlockNumPseudoID(DefAddr) || STARS_IsLiveInPseudoID(DefAddr)) {
					SMP_msg("ERROR: SPARK: ExpandLoopRegHashExprs() found DefAddr %llx for DefHashValue %llx\n", (uint64_t) DefAddr, (uint64_t) DefHashValue);
					this->SymbolicAnalysisProblems[LoopNum + 1] = true;
				}
				else if (STARS_IsSSAMarkerPseudoID(DefAddr)) {
					// No DefInst expr to create; just an incoming reg value.
					// Create a DefExpr from scratch.
					SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
					assert(nullptr != DefInst);
					STARSOpndTypePtr DefOp = DefInst->MakeRegOpnd(DefRegNo);
					STARSExpression *DefExpr = new STARSExpression();
					DefExpr->SetParentFunc(this);
					DefExpr->SetParentInst(DefInst);
					DefExpr->SetOriginalParentInst(DefInst);
					DefExpr->SetLeftOperand(DefOp);
					DefExpr->SetLeftUseAddr(DefAddr);
					DefExpr->SetOperator(SMP_ASSIGN);
					DefExpr->SetLeftPreLoopDefAddr(DefAddr);
					DefExpr->SetLeftSSANum(0);
					pair<STARSExprSetIter, bool> InsertResult = LoopMemAddrRegSourceExprs.insert(DefExpr);
					if (InsertResult.second) { // new expr
						pair<STARS_regnum_t, STARSExprSetIter> InsertValue(DefRegNo, InsertResult.first);
						this->LoopRegSourceExprPairs[LoopNum].push_back(InsertValue);
						SMP_msg("INFO: SPARK: ExpandLoopRegHashExprs() succeeded at DefAddr %llx for DefHashValue %llx\n", (uint64_t)DefAddr, (uint64_t)DefHashValue);
					}
				}
				else { // found a regular instruction at DefAddr.
					SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
					assert(nullptr != DefInst);
					STARSOpndTypePtr DefOp = DefInst->MakeRegOpnd(DefRegNo);
					SMPRegTransfer *DefRT = DefInst->GetDefRT(DefOp);
					if (nullptr == DefRT) {
						SMP_msg("ERROR: SPARK: ExpandLoopRegHashExprs() did not find DefRT at DefAddr %llx for DefHashValue %llx\n", (uint64_t) DefAddr, (uint64_t) DefHashValue);
						this->SymbolicAnalysisProblems[LoopNum + 1] = true;
					}
					else {
						// Create an expr from the DefRT and expand it.
						STARSExpression *DefExpr = new STARSExpression(DefRT);
						set<int> DummyLoopRegHashes;
						bool StoppedOnIV = false;
						bool changed = false;
						set<STARS_ea_t> StackPtrCopySet;
						int DepthCounter = 0;
						if (DefExpr->ExpandExpr(DefAddr, LoopNum, false, true, false, false, false, DummyLoopRegHashes, StoppedOnIV, changed, StackPtrCopySet, DepthCounter)) {
							if (!StoppedOnIV) {
								DefExpr->EvaluateConsts();
								(void) DefExpr->SimplifyDriver();
								pair<STARSExprSetIter, bool> InsertResult = LoopMemAddrRegSourceExprs.insert(DefExpr);
								if (InsertResult.second) { // new expr
									pair<STARS_regnum_t, STARSExprSetIter> InsertValue(DefRegNo, InsertResult.first);
									this->LoopRegSourceExprPairs[LoopNum].push_back(InsertValue);
									SMP_msg("INFO: SPARK: ExpandLoopRegHashExprs() succeeded at DefAddr %llx for DefHashValue %llx\n", (uint64_t) DefAddr, (uint64_t) DefHashValue);
								}
							}
							else {
								SMP_msg("ERROR: SPARK: ExpandLoopRegHashExprs() StoppedOnIV for DefAddr %llx for DefHashValue %llx\n", (uint64_t)DefAddr, (uint64_t)DefHashValue);
								this->SymbolicAnalysisProblems[LoopNum + 1] = true;
							}
						}
						else {
							SMP_msg("ERROR: SPARK: ExpandLoopRegHashExprs() failed ExpandExpr for DefAddr %llx for DefHashValue %llx\n", (uint64_t) DefAddr, (uint64_t) DefHashValue);
							this->SymbolicAnalysisProblems[LoopNum + 1] = true;
						}
					}
				}
			} // end for all HashIters in current set
		} // end if current hashed reg set is not empty
	} // end for all LoopNum values
	return;
} // end of SMPFunction::ExpandLoopRegHashExprs()

// Analyze switch starting at indir jump at end of CurrBlock, or go past default jumps to indir jump; return false if not well-structured
bool SMPFunction::AnalyzeSwitchStatement(SMPBasicBlock *CurrBlock) {
	bool StructuredSwitch = true;

	SMPInstr *LastInst = (*(CurrBlock->GetRevInstBegin()));
	STARS_ea_t LastAddr = LastInst->GetAddr();
	if (INDIR_JUMP != LastInst->GetDataFlowType()) {
		StructuredSwitch = false;
		SMP_msg("WARNING: Unstructured switch at %llx due to lack of INDIR_JUMP.\n",
			(uint64_t) LastAddr);
	}
	else { // populate the switch data structures and mark control flow types for switch-related jumps
		size_t SwitchIndex = this->SwitchJumpMap.size(); // first index is zero, etc.
		assert(this->SwitchInfoArray.size() == this->SwitchJumpMap.size());
		struct SwitchTableInfo TableInfo;
		bool success = LastInst->AnalyzeSwitchInfo(TableInfo);
		if (!success) {
			StructuredSwitch = false;
			SMP_msg("WARNING: Unstructured switch at %llx due to SMPInstr::AnalyzeSwitchInfo failure.\n",
				(unsigned long long) LastAddr);
		}
		else {
			// Mark the jumps to the default case and find the dominator node.
			this->FindSwitchIDom(TableInfo);

			// Find consistent follow block for all cases.
			if (this->FindSwitchStatementFollowBlock(TableInfo)) {

				// Map current INDIR_JUMP instruction address to the SwitchIndex,
				pair<STARS_ea_t, size_t> JumpMapItem(LastInst->GetAddr(), SwitchIndex);
				pair<map<STARS_ea_t, size_t>::iterator, bool> InsertResult = this->SwitchJumpMap.insert(JumpMapItem);
				assert(InsertResult.second);

				// Add TableInfo to the SwitchInfoArray.
				this->SwitchInfoArray.push_back(TableInfo);
				assert(this->SwitchInfoArray.size() == this->SwitchJumpMap.size());  // still in sync

				// Mark the jumps from the end of each case to the follow node.
				assert(TableInfo.FollowNodeNum >= 0);
				SMPBasicBlock *FollowBlock = this->GetBlockByNum((size_t) TableInfo.FollowNodeNum);
				// Every predecessor of FollowBlock that ends with a jump should have the jump marked
				//  as a CASE_BREAK_TO_FOLLOW_NODE. This will catch the default case as well.
				for (list<SMPBasicBlock *>::iterator PredIter = FollowBlock->GetFirstPred(); PredIter != FollowBlock->GetLastPred(); ++PredIter) {
					SMPBasicBlock *PredBlock = (*PredIter);
					vector<SMPInstr *>::iterator LastInstIter = --(PredBlock->GetLastInst());
					SMPInstr *LastInst = (*LastInstIter);
					STARS_ea_t LastAddr = LastInst->GetAddr();
					SMPitype FlowType = LastInst->GetDataFlowType();
					if (JUMP == FlowType) {
						this->SetControlFlowType(LastAddr, CASE_BREAK_TO_FOLLOW_NODE);
					}
					else if (DEFAULT == FlowType) { // fall-through to follow node could be optimization for final case
						;
					}
					else { // something odd here
						SMP_msg("ERROR: Inst at %llx should jump or fall through to switch follow node.\n",
							(unsigned long long) LastAddr);
					}
				}
				// Mark jumps around the default case to the INDIR_JUMP.
				if (TableInfo.DefaultJumpAddr != STARS_BADADDR) { // we have a default case
					for (list<SMPBasicBlock *>::iterator PredIter = CurrBlock->GetFirstPred(); PredIter != CurrBlock->GetLastPred(); ++PredIter) {
						SMPBasicBlock *PredBlock = (*PredIter);
						SMPInstr *LastPredInst = (*(PredBlock->GetRevInstBegin()));
						STARS_ea_t LastPredAddr = LastPredInst->GetAddr();
						SMPitype FlowType = LastPredInst->GetDataFlowType();
						ControlFlowType LastPredCFType = this->GetControlFlowType(LastPredAddr);
						if ((LastPredCFType == FALL_THROUGH) &&((FlowType == JUMP) || (FlowType == COND_BRANCH))) {
							// FALL_THROUGH means the type has not been marked yet, e.g. not a LOOP_EXIT.
							// if-then-else marking has not happened yet (ordering is important).
							// Don't want to mark simple if-else jumps, so see if we are jumping around default case blocks
							if (this->DoesBlockDominateBlock(TableInfo.IDomBlockNum, PredBlock->GetNumber())) {
								this->SetControlFlowType(LastAddr, JUMP_TO_SWITCH_INDIR_JUMP);
								if (FlowType == COND_BRANCH) {
									// Poorly structured. We could have if (cond) switch {...} but then the if-block
									//  would not be dominated by the INDIR_JUMP block, which is TableInfo.IDomBlockNum for
									//  the no-default-case switches.
									StructuredSwitch = false;
									break;
								}
							}
						}
					}
				}
			}
			else {
				StructuredSwitch = false;
				SMP_msg("WARNING: Unstructured switch at %llx due to SMPFunction::FindSwitchStatementFollowBLock failure.\n",
					(unsigned long long) LastAddr);
			}
		}
	}

	if (StructuredSwitch) {
		SMP_msg("INFO: Structured switch at %llx\n", (unsigned long long) LastInst->GetAddr());
	}
	else {
		SMP_msg("INFO: Unstructured switch at %llx\n", (unsigned long long) LastInst->GetAddr());
	}

	return StructuredSwitch;
} // end of SMPFunction::AnalyzeSwitchStatement()

// Mark jumps to default case, find IDom of entire switch statement
void SMPFunction::FindSwitchIDom(struct SwitchTableInfo &TableInfo) {
	// The TableInfo.IndirJumpBlockNum gives the IDom of all non-default cases.
	//  We need to find the IDom of all blocks that jump to the default case, if any, and
	//  then the IDom of {that IDom and the indir jump block}.
	if (TableInfo.DefaultJumpAddr == STARS_BADADDR) {
		// Simplest case: No default cases, no blocks jump to a default case, so INDIR_JUMP block is IDom
		//  for the whole switch statement.
		TableInfo.IDomBlockNum = TableInfo.IndirJumpBlockNum;
	}
	else {
		// Find the blocks that jump to the default block. Mark the jumps while computing the IDom over all of them.
		SMPBasicBlock *DefaultCaseBlock = this->GetBlockByNum((size_t) TableInfo.DefaultCaseBlockNum);
		assert(NULL != DefaultCaseBlock);
		list<SMPBasicBlock *>::iterator PredIter = DefaultCaseBlock->GetFirstPred();
		assert(PredIter != DefaultCaseBlock->GetLastPred());
		int IDom = (int) this->RPOBlocks.size(); // Find lowest RPO number, init to max
		while (PredIter != DefaultCaseBlock->GetLastPred()) {
			SMPBasicBlock *PredBlock = (*PredIter);
			SMPInstr *LastInst = (*(--PredBlock->GetLastInst()));
			assert(NULL != LastInst);
			SMPitype FlowType = LastInst->GetDataFlowType();
			if ((COND_BRANCH == FlowType) || (JUMP == FlowType)) {
				this->SetControlFlowType(LastInst->GetAddr(), JUMP_TO_DEFAULT_CASE);
			}
			else if ((INDIR_JUMP == FlowType) && (PredBlock->GetNumber() == TableInfo.IndirJumpBlockNum)) {
				// skip over switch table jumps to the default case
				++PredIter;
				continue;
			}
			int CurrNum = PredBlock->GetNumber();
			assert(SMP_BLOCKNUM_UNINIT != CurrNum);
			assert(CurrNum < (int)this->IDom.size());
			int CurrIDom = this->IDom[CurrNum];
			IDom = (IDom > CurrIDom) ? CurrIDom : IDom; // keep running minimum RPO IDom number
			++PredIter;
		}

		// Blocks that jump to the default case should precede and dominate the INDIR_JUMP block, but
		//  we will ensure safety here.
		IDom = (IDom > TableInfo.IndirJumpBlockNum) ? TableInfo.IndirJumpBlockNum : IDom; // keep running minimum RPO IDom number
		assert(SMP_BLOCKNUM_UNINIT != IDom);
		TableInfo.IDomBlockNum = IDom;
	}
	return;
} // end of SMPFunction::FindSwitchIDom()

// build skeleton CFG, then use in coalescing nodes to analyze expressions
void SMPFunction::BuildShadowCFG(void) {
	// First, just get block numbers into the vector entries.
	for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		STARSCFGBlock *ShadowBlock = new STARSCFGBlock(this, (int) BlockIndex);
		this->ShadowCFGBlocks.push_back(ShadowBlock);
		// Keep track of fall-through and non-fall-through blocks inside ShadowCFG.
		SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
		list<SMPBasicBlock *>::const_iterator SuccIter = CurrBlock->GetFallThroughSucc();
		if (SuccIter != CurrBlock->GetLastConstSucc()) {
			ShadowBlock->GetExpr()->SetFTBlockNum((*SuccIter)->GetNumber());
		}
		SuccIter = CurrBlock->GetCondNonFallThroughSucc();
		if (SuccIter != CurrBlock->GetLastConstSucc()) {
			ShadowBlock->GetExpr()->SetNFTBlockNum((*SuccIter)->GetNumber());
		}
	}

	
	return;
} // SMPFunction::BuildShadowCFG()

// Make LeftExpr be (LeftExpr TopOper RightExpr) or ((not LeftExpr) TopOper RightExpr
void SMPFunction::CoalesceShadowBlocks(int LeftBlockNum, int RightBlockNum, bool NegateLeft, SMPoperator TopOper, int NewFTNum, int NewNFTNum) {
	assert((0 <= LeftBlockNum) && (LeftBlockNum < (int)this->ShadowCFGBlocks.size()));
	assert((0 <= RightBlockNum) && (RightBlockNum < (int)this->ShadowCFGBlocks.size()));
	bool LoopExitCase = !(this->AreBlocksInSameLoops(NewFTNum, NewNFTNum));
	STARSCFGBlock *LeftCFGBlock = this->ShadowCFGBlocks[LeftBlockNum];
	STARSCFGBlock *RightCFGBlock = this->ShadowCFGBlocks[RightBlockNum];

	assert(!LeftCFGBlock->IsCoalesced());
	assert(!RightCFGBlock->IsCoalesced());

	STARSCondExpr *NewTopExpr = new STARSCondExpr();
	NewTopExpr->SetOperator(TopOper);
	NewTopExpr->SetCFGBlock(LeftCFGBlock);
	// If either LeftExpr or RightExpr is just a block, we want to set that block
	//  as the new LeftExpr or RightExpr and not copy the whole expr with its SMP_NULL_OPERATOR.
	if (SMP_NULL_OPERATOR == LeftCFGBlock->GetExpr()->GetOperator()) {
		NewTopExpr->SetLeftExprBlockNum(LeftBlockNum, NegateLeft);
	}
	else {
		NewTopExpr->SetLeftExpr(LeftCFGBlock->GetExpr(), NegateLeft);
	}
	if (SMP_NULL_OPERATOR == RightCFGBlock->GetExpr()->GetOperator()) {
		NewTopExpr->SetRightExprBlockNum(RightBlockNum);
	}
	else {
		NewTopExpr->SetRightExpr(RightCFGBlock->GetExpr());
	}
	// Set the then-block (NFT) and else-block (FT)
	NewTopExpr->SetNFTBlockNum(NewNFTNum);
	NewTopExpr->SetFTBlockNum(NewFTNum);

	// Set the NewTopExpr as the expr for LeftBlockNum.
	LeftCFGBlock->SetExpr(NewTopExpr);

	// Mark RightBlockNum as coalesced.
	RightCFGBlock->SetCoalesced();

	// Mark the conditional branches at the end of LeftBlockNum and RightBlockNum as short circuit branches.
	SMPBasicBlock *LeftBlock = this->RPOBlocks[LeftBlockNum];
	SMPBasicBlock *RightBlock = this->RPOBlocks[RightBlockNum];
	STARS_ea_t LeftLastAddr = LeftBlock->GetLastAddr();
	STARS_ea_t RightLastAddr = RightBlock->GetLastAddr();
	if (LoopExitCase) {
		this->SetControlFlowType(LeftLastAddr, SHORT_CIRCUIT_LOOP_EXIT);
		this->SetControlFlowType(RightLastAddr, SHORT_CIRCUIT_LOOP_EXIT);
	}
	else {
		this->SetControlFlowType(LeftLastAddr, SHORT_CIRCUIT_BRANCH);
		this->SetControlFlowType(RightLastAddr, SHORT_CIRCUIT_BRANCH);
	}

	return;
} // end of SMPFunction::CoalesceShadowBlocks()

// Find a consistent follow block for all switch statement cases and record it in TableInfo.FollowNodeNum
// return false if no consistent follow block
bool SMPFunction::FindSwitchStatementFollowBlock(struct SwitchTableInfo &TableInfo) {
	bool ConsistentFollowBlock = true;
	size_t CaseCount = TableInfo.CaseBlockNums.size(); // number of incoming edges we expect to see to the follow block
	int DefaultBlockNum = SMP_BLOCKNUM_UNINIT;
	if (STARS_BADADDR != TableInfo.DefaultJumpAddr) {
		++CaseCount;
		// See if jump table entries also go to the default address, and decrement CaseCount
		//  every time we see such an entry.
#if 0   // indirection, or not? DefaultJumpAddr seems to be the first inst in the default case.
		SMPInstr *DefJumpInst = this->GetInstFromAddr(TableInfo.DefaultJumpAddr);
		assert(NULL != DefJumpInst);
		STARS_ea_t DefaultCaseAddr = DefJumpInst->GetJumpTarget();
#else
		STARS_ea_t DefaultCaseAddr = TableInfo.DefaultJumpAddr;
#endif
		assert(STARS_BADADDR != DefaultCaseAddr);
		DefaultBlockNum = TableInfo.DefaultCaseBlockNum;
		assert(SMP_BLOCKNUM_UNINIT != DefaultBlockNum);
		SMPBasicBlock *DefaultBlock = this->GetBlockByNum(DefaultBlockNum);
		assert(NULL != DefaultBlock);
		DefaultBlock->SetSwitchDefaultCase();
		for (size_t CaseIndex = 0; CaseIndex < TableInfo.CaseBlockNums.size(); ++CaseIndex) {
			if (TableInfo.CaseBlockNums[CaseIndex] == DefaultBlockNum) {
				--CaseCount; // Only one edge from default case to follow block can be expected, no matter how many
								// incoming edges to the default block there are
			}
		}
	}

	// Find the immediate dominator for the switch statement.

	// Now, we define a well-structured switch statement to have a follow block with CaseCount incoming edges, reachable
	//  from all cases. Note that if we stopped after finding a reachable block with CaseCount incoming edges without
	//  checking that all cases can reach it, we might be at a follow block for a nested switch statement inside our current
	//  switch statement.
	this->ResetProcessedBlocks();
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
	for (size_t CaseIndex = 0; ConsistentFollowBlock && (CaseIndex < TableInfo.CaseBlockNums.size()); ++CaseIndex) {
		int CurrFollowBlockNum = this->FindCaseFollowBlock(TableInfo.CaseBlockNums[CaseIndex], TableInfo.IDomBlockNum, CaseCount);
		if (CurrFollowBlockNum == SMP_BLOCKNUM_UNINIT) { // failure
			ConsistentFollowBlock = false;
			SMP_msg("WARNING: Unstructured switch at block %d due to SMPFunction::FindCaseFollowBlock failure at block %d.\n",
				TableInfo.IndirJumpBlockNum, TableInfo.CaseBlockNums[CaseIndex]);
		}
		else if (-2 == CurrFollowBlockNum) {
			// should not happen on first case
			assert(0 != CaseIndex);
		}
		else { // found follow block num
			if (0 == CaseIndex) { // first iteration
				FollowBlockNum = CurrFollowBlockNum;
			}
			else { // check for consistency
				ConsistentFollowBlock = (FollowBlockNum == CurrFollowBlockNum);
				if (!ConsistentFollowBlock)
					SMP_msg("WARNING: Unstructured switch at block %d due to SMPFunction::FindCaseFollowBlock inconsistency at block %d.\n",
						TableInfo.IndirJumpBlockNum, TableInfo.CaseBlockNums[CaseIndex]);
			}
		}
	} // end for all cases except default
	if (ConsistentFollowBlock && (DefaultBlockNum != SMP_BLOCKNUM_UNINIT)) {
		// Check default block follow node consistency
		int CurrFollowBlockNum = this->FindCaseFollowBlock(DefaultBlockNum, TableInfo.IDomBlockNum, CaseCount);
		ConsistentFollowBlock = ((-2 == CurrFollowBlockNum) || (CurrFollowBlockNum == FollowBlockNum));
		if (!ConsistentFollowBlock)
			SMP_msg("WARNING: Unstructured switch at block %d due to SMPFunction::FindCaseFollowBlock inconsistency at default block %d.\n",
				TableInfo.IndirJumpBlockNum, TableInfo.DefaultCaseBlockNum);
	}

	if (ConsistentFollowBlock) {
		TableInfo.FollowNodeNum = FollowBlockNum;
	}

	return ConsistentFollowBlock;
} // end of SMPFunction::FindSwitchStatementFollowBlock()

// Start at CaseBlockNum, return followblocknum with IncomingEdgeCount 
int SMPFunction::FindCaseFollowBlock(int CaseBlockNum, int HeaderBlockNum, std::size_t IncomingEdgeCount) const {
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
	// We define a well-structured switch statement suh that it is forbidden to have one case fall through to another.
	//  Our algorithm would need to be modified to handle fall-throughs, which would require special translation to SPARK Ada
	//  as such case fall-throughs are not permitted in SPARK Ada.

	// Algorithm: Traverse CFG from CaseBlockNum until we find a block with IncomingEdgeCount that is dominated by HeaderBlockNum.
	//  If we encounter an already processed block, stop and return -2 as the block number to signal this condition.
	//  If one of our successors finds a follow block and the others all return -2, return the follow block number.
	//  If we reach a block not dominated by HeaderBlockNum before finding a follow block or a visited block, return -1.
	SMPBasicBlock *CaseBlock = this->GetBlockByNum(CaseBlockNum);
	assert(NULL != CaseBlock);
	CaseBlock->SetProcessed(true); // visited
	for (list<SMPBasicBlock *>::iterator SuccIter = CaseBlock->GetFirstSucc(); SuccIter != CaseBlock->GetLastSucc(); ++SuccIter) {
		SMPBasicBlock *SuccBlock = (*SuccIter);
		int SuccBlockNum = SuccBlock->GetNumber();
		// NOTE: We have to screen out nested switch statements' follow blocks.
		STARS_ea_t LastBlockAddr = SuccBlock->GetLastAddr();
		ControlFlowType SuccBranchType = this->GetControlFlowType(LastBlockAddr);
		bool NestedSwitch = (SuccBranchType == CASE_BREAK_TO_FOLLOW_NODE); // avoid being deceived by nested follow nodes
		// Possibilities: already visited; not dominated by HeaderBlockNum; has IncomingEdgeCount; or needs recursion
		if (this->DoesBlockDominateBlock(HeaderBlockNum, SuccBlockNum)) {
			if (SuccBlock->IsProcessed()) {
				if (FollowBlockNum == SMP_BLOCKNUM_UNINIT)
					FollowBlockNum = -2;  // record first already-visited block
			}
			else if (!NestedSwitch && (SuccBlock->GetNumPreds() == IncomingEdgeCount)) {
				// follow block found.
				SuccBlock->SetProcessed(true);
				FollowBlockNum = SuccBlockNum;
			}
			else { // need to recurse into successor block
				int SuccFollowBlockNum = this->FindCaseFollowBlock(SuccBlockNum, HeaderBlockNum, IncomingEdgeCount);
				// Meet function for existing FollowBlockNum and SuccFollowBlockNum
				if ((FollowBlockNum == SMP_BLOCKNUM_UNINIT) || (FollowBlockNum == -2)) {
					FollowBlockNum = SuccFollowBlockNum;
					if (FollowBlockNum == SMP_BLOCKNUM_UNINIT) { // recursion returned problem code
						break; // failure
					}
				}
				else { // already had good follow block number; just looking for consistency
					if (SuccFollowBlockNum == SMP_BLOCKNUM_UNINIT) { // failure
						FollowBlockNum = SMP_BLOCKNUM_UNINIT;
						break;
					}
					else if (SuccFollowBlockNum >= 0) { // not the -2 visited code; found follow block
						if (SuccFollowBlockNum != FollowBlockNum) { // inconsistent
							FollowBlockNum = SMP_BLOCKNUM_UNINIT;
							break;
						}
					}
				}
			}
		}
		else { // cannot be structured switch statement; wandered out of switch without finding good follow block.
			FollowBlockNum = SMP_BLOCKNUM_UNINIT;
			break;
		}
	} // end for all successors

	return FollowBlockNum;
} // end of SMPFunction::FindCaseFollowBlock()

// Search SwitchInfoArray for DefaultCaseAddr, return index
size_t SMPFunction::FindSwitchIndexForDefaultCaseAddr(STARS_ea_t DefaultCaseAddr) const {
	assert(STARS_BADADDR != DefaultCaseAddr);
	size_t FoundSwitchIndex = this->SwitchInfoArray.size() + 1;
	assert(!this->SwitchInfoArray.empty());

	for (size_t SwitchIndex = 0; SwitchIndex < this->SwitchInfoArray.size(); ++SwitchIndex) {
		if (this->SwitchInfoArray[SwitchIndex].DefaultJumpAddr == DefaultCaseAddr) {
			FoundSwitchIndex = SwitchIndex;
			break;
		}
	}

	assert(FoundSwitchIndex < this->SwitchInfoArray.size());
	return FoundSwitchIndex;
} // end of SMPFunction::FindSwitchIndexForDefaultCaseAddr()

// Analyze if-statements with short-circuit operators
bool SMPFunction::AnalyzeCompoundConditionalStatements(void) {
	bool Structured = true;

	// First, build a shadow of the CFG with successor block numbers.
	this->BuildShadowCFG();

	// Second, iterate until no more changes: Detect compound conditional exprs, mark and coalesce.
	bool changed;
	do {
		changed = false;
		for (size_t BlockIndex = 0; BlockIndex < this->ShadowCFGBlocks.size(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
			int CurrBlockNum = CurrBlock->GetNumber();
			if (!this->ShadowCFGBlocks[CurrBlockNum]->IsCoalesced() && CurrBlock->HasConditionalBranch()) {
				// Candidate for beginning a compound (a.k.a. short-circuit) conditional.

				// There are four patterns that cause the next block to be coalesced into this one
				//  as a part of the same compound conditional. These are shown in Figure 6-32 of
				//  Cristina Cifuentes' Ph.D. dissertation on decompilation. With x being the condition
				//  at the end of the first block, and y the condition at the end of the second block,
				//  and "x" below meaning "take branch from block with condition x" and ditto for "y",
				//  these four CFG patterns match the short-circuit evaluation of:
				//  1. x AND y  (i.e. taking the branch from x to y and then also from y leads to a dest. block,
				//               falling through from either one leads to a common second dest. block).
				//         x---+
				//         |   |
				//         |   y---+
				//         |  /    |
				//         | /     |
				//         FT1   NFTNFT1
				//
				//  2. x OR y   (i.e. taking branch from x, or falling through to y and taking branch from y,
				//               leads to first destination block, while falling through to y and then falling through
				//               from y leads to a second destination block)
				//  3. !x OR y  (i.e. falling through from x, or taking branch from x to y and then taking
				//               branch from y, leads to a first dest. block; falling through from y leads
				//               to a second destination block)
				//  4. !x AND y (i.e. falling through from x to y and taking branch from y leads to first
				//               dest. block; taking branch from x or falling through from y leads to second
				//               destination block).
				// Numbering the blocks 1 and 2, ending with exprs x and y respectively, we can identify the four
				//  patterns strictly by the FT (fallthrough) and NFT (non-fall-through) block patterns, observing
				//  that (a) there must be only four blocks and (b) block 2 must be only a conditional expression
				//  and (c) block 2 must only have one predecessor block (block 1):
				//  1. FT(1) = FT(NFT(1))   [implies NFT(1) == 2]
				//  2. NFT(1) = NFT(FT(1))  [implies FT(1) == 2]
				//  3. FT(1) = NFT(NFT(1))  [implies NFT(1) == 2]
				//  4. NFT(1) = FT(FT(1))   [implies FT(1) == 2]
				// The resulting CFG and be structured for the four cases as follows, using Ada short-circuit operators:
				// 1. if x and then y then
				//       NFT(NFT(1))
				//    else
				//       FT(1)
				//    endif
				// 2. if x or else y then
				//       NFT(1)
				//    else
				//       FT(FT(1))
				//    endif
				// 3. if not x or else y then
				//       FT(1)
				//    else
				//       FT(NFT(1))
				//    endif
				// 4. if not x and then y then
				//       NFT(FT(1))
				//    else
				//       NFT(1)
				//    endif
				//  We note that the detection of short-circuit operators is iterative, starting from
				//   RPO block 0 and going downward in the CFG. In example 1 above, the block FT(1) in
				//   the "else" branch could actually be the start of another short-circuit expression,
				//   and likewise for every block in a "then" or "else" branch for all four examples.
				int FT1BlockNum = this->ShadowCFGBlocks[CurrBlockNum]->GetExpr()->GetFallThroughBlockNum();
				int NFT1BlockNum = this->ShadowCFGBlocks[CurrBlockNum]->GetExpr()->GetNonFallThroughBlockNum();
				if ((SMP_BLOCKNUM_UNINIT != FT1BlockNum) && (SMP_BLOCKNUM_UNINIT != NFT1BlockNum)) {
					SMPBasicBlock *FT1Block = this->RPOBlocks[FT1BlockNum];
					SMPBasicBlock *NFT1Block = this->RPOBlocks[NFT1BlockNum];
					// Two of the four cases have a conditional-expression-only block 2 as FT(1). See if we
					//  have either case.
					if (FT1Block->IsSingleExpression() && (1 == FT1Block->GetNumPreds())) { // might be case 2 or case 4
						int FTFT1BlockNum = this->ShadowCFGBlocks[FT1BlockNum]->GetExpr()->GetFallThroughBlockNum();
						int NFTFT1BlockNum = this->ShadowCFGBlocks[FT1BlockNum]->GetExpr()->GetNonFallThroughBlockNum();
						assert(SMP_BLOCKNUM_UNINIT != FTFT1BlockNum);
						assert(SMP_BLOCKNUM_UNINIT != NFTFT1BlockNum);
						if (FTFT1BlockNum == NFT1BlockNum) { // case 4
							// Coalesce FT1Block condition into CurrBlock condition
							this->CoalesceShadowBlocks(CurrBlockNum, FT1BlockNum, true, SMP_LOGICAL_AND, NFT1BlockNum, NFTFT1BlockNum);
							changed = true;
						}
						else if (NFT1BlockNum == NFTFT1BlockNum) { // case 2
							// Coalesce FT1Block condition into CurrBlock condition
							this->CoalesceShadowBlocks(CurrBlockNum, FT1BlockNum, false, SMP_LOGICAL_OR, FTFT1BlockNum, NFT1BlockNum);
							changed = true;
						}
					}
					else if (NFT1Block->IsSingleExpression() && (1 == NFT1Block->GetNumPreds())) { // might be case 1 or case 3
						int FTNFT1BlockNum = this->ShadowCFGBlocks[NFT1BlockNum]->GetExpr()->GetFallThroughBlockNum();
						int NFTNFT1BlockNum = this->ShadowCFGBlocks[NFT1BlockNum]->GetExpr()->GetNonFallThroughBlockNum();
						if (FT1BlockNum == FTNFT1BlockNum) { // case 1
							// Coalesce NFT1Block condition into CurrBlock condition
							this->CoalesceShadowBlocks(CurrBlockNum, NFT1BlockNum, false, SMP_LOGICAL_AND, FT1BlockNum, NFTNFT1BlockNum);
							changed = true;
						}
						else if (FT1BlockNum == NFTNFT1BlockNum) { // case 3
							// Coalesce NFT1Block condition into CurrBlock condition
							this->CoalesceShadowBlocks(CurrBlockNum, NFT1BlockNum, true, SMP_LOGICAL_OR, FTNFT1BlockNum, FT1BlockNum);
							changed = true;
						}
					}
				}
			} // end if CurrBlock not coalesced and ends with conditional branch
		} // end for all blocks
	} while (changed);

	// Third, arrange the ShadowCFG into if-then-else order for later translation.
	//  We particularly want to avoid generating code that looks like:
	//  if (compound condition) then
	//     ;  // no code here
	//  else
	//     // code here
	//  endif;
	//  So, we prefer to have RPO order, with smaller block number in the then-branch.
	for (size_t BlockIndex = 0; BlockIndex < this->ShadowCFGBlocks.size(); ++BlockIndex) {
		STARSCFGBlock *CurrCFGBlock = ShadowCFGBlocks[BlockIndex];
		if (!CurrCFGBlock->IsCoalesced() && (SMP_NULL_OPERATOR != CurrCFGBlock->GetExpr()->GetOperator())) {
			// We have the header block of a short circuit expression.
			//  Distinguish between loop exits and if-then-else statements.
			int OrigBlockNum = CurrCFGBlock->GetOriginalBlockNum();
			assert(0 <= OrigBlockNum);
			SMPBasicBlock *OrigBlock = this->GetBlockByNum((size_t) OrigBlockNum);
			STARS_ea_t LastAddr = OrigBlock->GetLastAddr();
			ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
			if (SHORT_CIRCUIT_BRANCH == LastCFType) { // not SHORT_CIRCUIT_LOOP_EXIT
				int FollowBlockNum = this->FindConditionalFollowNode((int) BlockIndex);
				if (SMP_BLOCKNUM_UNINIT == FollowBlockNum) {
					SMP_msg("ERROR: Cannot find follow block for SHORT_CIRCUIT_BRANCH in block %zu in %s\n",
						BlockIndex, this->GetFuncName());
					Structured = false;
					break;
				}
				else if (SMP_BLOCKNUM_COMMON_RETURN == FollowBlockNum) {
					SMP_msg("INFO: Common return follow block for SHORT_CIRCUIT_BRANCH in block %zu in %s\n",
						BlockIndex, this->GetFuncName());
					continue; // no code re-arranging to do
				}
				STARSCondExpr *CurrExpr = CurrCFGBlock->GetExpr();
				int FTBlockNum = CurrExpr->GetFallThroughBlockNum();
				int NFTBlockNum = CurrExpr->GetNonFallThroughBlockNum();
				// Check for the if-then case with no else-block.
				bool IfThenCase = false;
				if ((NFTBlockNum == FollowBlockNum) || (FTBlockNum == FollowBlockNum)) {
					IfThenCase = true;
					// Ensure that we arrange it so that it translates directly to:
					//   if (compound condition) then
					//      do_something;
					//   endif;
					assert(NFTBlockNum != FTBlockNum); // got to have some code in there somewhere to execute
					if (FTBlockNum == FollowBlockNum) {
						// Current structure is:
						//   if (compound condition) then
						//       goto NFTBlock;
						//   FTBlock:
						//   NFTBlock:
						//       [code here]
						//       goto FTBlock;  // all messed up
						// Need to invert the condition and swap FT and NFT blocks.
						// if (inverted compound condition) then goto FTBlock;
						//    NFTBlock:
						//    [code here]
						// endif;
						// FTBlock:
						if (FTBlockNum > NFTBlockNum) {  // follow node is IDom of condition node, RPO # is greater than if-else blocks.
							STARSCondExpr *NewExpr = new STARSCondExpr();
							NewExpr->SetLeftExpr(CurrExpr, false);
							NewExpr->SetOperator(SMP_BITWISE_NOT);  // too much trouble to introduce new operator SMP_LOGICAL_NOT
							NewExpr->SetFTBlockNum(NFTBlockNum);
							NewExpr->SetNFTBlockNum(FollowBlockNum);
							CurrCFGBlock->SetExpr(NewExpr);
						}
						else {
							Structured = false;
							SMP_msg("ERROR: In inverted SHORT_CIRCUIT_BRANCH at block %zu FTBlockNum: %d not > than NFTBlockNum: %d in %s\n",
								BlockIndex, FTBlockNum, NFTBlockNum, this->GetFuncName());
							break;
						}
					}
				}
				else {
					// if-then-else structure, but we usually want the ASM structure to be:
					//  jcond NFTBlock
					//  FTBlock here
					//  jmp FollowBlock
					//  NFTBlock:
					//  [code, then fall through to FollowBlock]
					//  FollowBlock here
					//
					// So, we want whichever block falls through to the FollowBlock to be either the NFTBlock
					//  or to be dominated by the NFTBlock (NFTBlock could start a loop or any other structure
					//  besides a simple basic block).
					int FTPredBlockNum = this->GetFallThroughPredBlockNum(FollowBlockNum);
					if (SMP_BLOCKNUM_UNINIT != FTPredBlockNum) {
						// Is FTPredBlockNum equal to FTBlock, or dominated by FTBlock?
						if (this->DoesBlockDominateBlock(FTBlockNum, FTPredBlockNum)) {
							// Need to switch FT and NFT and invert the condition.
							STARSCondExpr *NewExpr = new STARSCondExpr();
							NewExpr->SetLeftExpr(CurrExpr, false);
							NewExpr->SetOperator(SMP_BITWISE_NOT);  // too much trouble to introduce new operator SMP_LOGICAL_NOT
							NewExpr->SetFTBlockNum(NFTBlockNum);
							NewExpr->SetNFTBlockNum(FTBlockNum);
							NewExpr->SetCFGBlock(CurrCFGBlock);
							CurrCFGBlock->SetExpr(NewExpr);
						}
					}
				} // end if if-then-case else if-then-else case
				// Mark unconditional jumps and branches to the follow node in the JumpFollowNodes map.
				SMPBasicBlock *FollowBlock = this->RPOBlocks[FollowBlockNum];
				for (list<SMPBasicBlock *>::iterator PredIter = FollowBlock->GetFirstPred(); PredIter != FollowBlock->GetLastPred(); ++PredIter) {
					if ((*PredIter)->HasDirectJump() || (*PredIter)->HasConditionalBranch()) {
						STARS_ea_t LastAddr = (*PredIter)->GetLastAddr();
						assert(STARS_BADADDR != LastAddr);
						pair<STARS_ea_t, int> MapItem(LastAddr, FollowBlockNum);
						pair<map<STARS_ea_t, int>::iterator, bool> InsertResult = this->JumpFollowNodesMap.insert(MapItem);
						if (!InsertResult.second) {
							// Were we inserting the same follow node again, or a conflicting follow node?
							map<STARS_ea_t, int>::iterator ExistingValueIter = InsertResult.first;
							int OldFollowBlockNum = ExistingValueIter->second;
							if (OldFollowBlockNum != FollowBlockNum) {
								SMP_msg("ERROR: SPARK CFG JumpFollowNodesMap conflict at %llx Old: %d New:%d\n", 
									(uint64_t) LastAddr, OldFollowBlockNum, FollowBlockNum);
								Structured = false;
								break;
							}
							else {
								SMP_msg("INFO: SPARK CFG JumpFollowNodesMap redundant update at %llx\n", (uint64_t) LastAddr);
							}
						}
					}
				}
				// Map header block COND_BRANCH to the follow node
				if (!IfThenCase) {
					SMPBasicBlock *HeaderBlock = this->GetBlockByNum(BlockIndex);
					STARS_ea_t LastAddr = HeaderBlock->GetLastAddr();
					pair<STARS_ea_t, int> MapItem(LastAddr, FollowBlockNum);
					pair<map<STARS_ea_t, int>::iterator, bool> InsertResult = this->JumpFollowNodesMap.insert(MapItem);
					if (!InsertResult.second) {
						// Were we inserting the same follow node again, or a conflicting follow node?
						map<STARS_ea_t, int>::iterator ExistingValueIter = InsertResult.first;
						int OldFollowBlockNum = ExistingValueIter->second;
						if (OldFollowBlockNum != FollowBlockNum) {
							SMP_msg("ERROR: SPARK CFG JumpFollowNodesMap conflict at %llx Old: %d New:%d\n",
								(uint64_t)LastAddr, OldFollowBlockNum, FollowBlockNum);
							Structured = false;
							break;
						}
						else {
							SMP_msg("INFO: SPARK CFG JumpFollowNodesMap redundant update at %llx\n", (uint64_t)LastAddr);
						}
					}
				}
			} // end if SHORT_CIRCUIT_BRANCH i.e. not SHORT_CIRCUIT_LOOP_EXIT
		}  // end if block is not coalesced and has a non-null operator (implies head of compound conditional)
	} // end for all ShadowCFGBlocks

	return Structured;
} // end of SMPFunction::AnalyzeCompoundConditionalStatements()

// Analyze if-then-else starting at COND_BRANCH at end of CurrBlock; return false if not well-structured
bool SMPFunction::AnalyzeConditionalStatements(void) {

	// First, mark the short-circuit compound conditionals.
	bool StructuredConditional = this->AnalyzeCompoundConditionalStatements();
	if (!StructuredConditional)
		return false;  // failed already, don't waste time

	// Algorithm is based on Figure 6-31 from Cristina Cifuentes' dissertation.
	//  We want to find innermost conditionals first, so we traverse the CFG in post-order.
	//  When we find a COND_BRANCH, we want to determine the type of conditional (if-then or
	//  if-then-else) and find its follow block.The follow block will have the COND_BRANCH block
	//  as its IDom, will have two or more predecessors, and will be the greatest such RPO-numbered block.
	//  If the COND_BRANCH goes straight to the follow block, we have an if-then; otherwise, we have if-then-else.
	//  We want to record the follow block for each COND_BRANCH (note that a block can be the follow
	//  block for more than one nested conditional).

	// Traverse CFG in post-order sequence and find COND_BRANCH statements and analyze them.

	vector<int> UnresolvedBranchBlocks;  // for tracking elsif branches until we reach the if block that dominates them
	for (vector<SMPBasicBlock *>::reverse_iterator BlockIter = this->RPOBlocks.rbegin(); BlockIter != this->RPOBlocks.rend(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		SMPInstr *LastInst = (*(CurrBlock->GetRevInstBegin()));
		STARS_ea_t LastAddr = LastInst->GetAddr();
		SMPitype FlowType = LastInst->GetDataFlowType();
		if (FlowType == COND_BRANCH) {
			ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
			if (FALL_THROUGH != LastCFType) { // already part of some loop or switch structure
				continue;
			}
			int HeadBlockNum = CurrBlock->GetNumber();
			int FollowNodeNum = this->FindConditionalFollowNode(HeadBlockNum);
			if (FollowNodeNum == SMP_BLOCKNUM_UNINIT) {
				UnresolvedBranchBlocks.push_back(HeadBlockNum);
#if 1
				// We should no longer need the resolution of UnresolvedBranchBlocks now
				//  that FindConditionalFollowNode() uses the FindTerminus method as a last
				//  resort. Any time we return SMP_BLOCKNUM_UNINIT, we have failed to find
				//  a follow node and we have unstructured control flow.
				break;
#endif
			}
			else if (FollowNodeNum == SMP_BLOCKNUM_COMMON_RETURN) {
				// Insert mapping from COND_BRANCH to follow block.
				pair<STARS_ea_t, int> MapItem(LastAddr, FollowNodeNum);
				pair<map<STARS_ea_t, int>::iterator, bool> InsertResult = this->JumpFollowNodesMap.insert(MapItem);
				if (!InsertResult.second) {
					// Were we inserting the same follow node again, or a conflicting follow node?
					map<STARS_ea_t, int>::iterator ExistingValueIter = InsertResult.first;
					int OldFollowBlockNum = ExistingValueIter->second;
					if (OldFollowBlockNum != FollowNodeNum) {
						SMP_msg("ERROR: SPARK CFG JumpFollowNodesMap conflict at %llx Old: %d New:%d\n",
							(uint64_t) LastAddr, OldFollowBlockNum, FollowNodeNum);
						StructuredConditional = false;
						break;
					}
					else {
						SMP_msg("INFO: SPARK CFG JumpFollowNodesMap redundant update at %llx\n", (uint64_t) LastAddr);
					}
				}

				this->SetControlFlowType(LastAddr, BRANCH_IF_THEN_ELSE);
				// Any unresolved branches were elsif branches for the current follow node.
				for (size_t ElsifIndex = 0; ElsifIndex < UnresolvedBranchBlocks.size(); ++ElsifIndex) {
					// Insert mappping from COND_BRANCH to follow block.
					int ElsifBlockNum = UnresolvedBranchBlocks[ElsifIndex];
					SMPBasicBlock *ElsifBlock = this->GetBlockByNum(ElsifBlockNum);
					assert(NULL != ElsifBlock);
					STARS_ea_t ElsifAddr = ElsifBlock->GetLastAddr();
					assert(STARS_BADADDR != ElsifAddr);
					pair<STARS_ea_t, int> MapItem(ElsifAddr, FollowNodeNum);
					pair<map<STARS_ea_t, int>::iterator, bool> InsertResult = this->JumpFollowNodesMap.insert(MapItem);
					if (!InsertResult.second) {
						// Were we inserting the same follow node again, or a conflicting follow node?
						map<STARS_ea_t, int>::iterator ExistingValueIter = InsertResult.first;
						int OldFollowBlockNum = ExistingValueIter->second;
						if (OldFollowBlockNum != FollowNodeNum) {
							SMP_msg("ERROR: SPARK CFG JumpFollowNodesMap conflict at %llx Old: %d New:%d\n",
								(uint64_t) ElsifAddr, OldFollowBlockNum, FollowNodeNum);
							StructuredConditional = false;
							break;
						}
						else {
							SMP_msg("INFO: SPARK CFG JumpFollowNodesMap redundant update at %llx\n", (uint64_t) ElsifAddr);
						}
					}
				} // end for each UnresolvedBranchBlock
				if (!UnresolvedBranchBlocks.empty()) {
					SMP_msg("INFO: SPARK CFG Resolved to ELSIF at %llx\n", (uint64_t) LastAddr);
				}
				UnresolvedBranchBlocks.clear();
			}
			else { // Found a good follow node.
				assert((0 <= FollowNodeNum) && (FollowNodeNum < (int) this->RPOBlocks.size()));
				SMPBasicBlock *FollowBlock = this->GetBlockByNum((size_t) FollowNodeNum);
				assert(NULL != FollowBlock);
				bool IfThenCase = FollowBlock->IsBlockPred(HeadBlockNum);
				if (IfThenCase) {
					this->SetControlFlowType(LastAddr, BRANCH_IF_THEN);
					if (!UnresolvedBranchBlocks.empty()) { // error; cannot have elsif branches for IfThenCase
						SMP_msg("ERROR: if-then-endif at %llx has unresolved branches inside.\n",
							(unsigned long long) LastAddr);
						StructuredConditional = false;
						break;
					}
					// Ensure that we won't assert when trying to generate code. For the
					//  normal if-then, we fall through to the then-block and branch to
					//  the follow block. For the OddIfThenCase, we fall through to the
					//  follow block and branch to the then-block.
					int DistantBlockNum = (*(CurrBlock->GetCondNonFallThroughSucc()))->GetNumber();
					bool OddIfThenCase = LastInst->IsOddIfThenCase();
					if (!OddIfThenCase) {
						if (DistantBlockNum != FollowNodeNum) {
							SMP_msg("ERROR: DistantBlock != FollowBlock for normal if-then at %llx in %s\n",
								(uint64_t) LastInst->GetAddr(), this->GetFuncName());
							this->HasStructuredCFG = false;
						}
					}
					else if (DistantBlockNum == FollowNodeNum) {
						SMP_msg("ERROR: DistantBlock == FollowBlock for OddIfThenCase at %llx in %s\n",
							(uint64_t) LastInst->GetAddr(), this->GetFuncName());
						this->HasStructuredCFG = false;
					}
				}
				else {
					this->SetControlFlowType(LastAddr, BRANCH_IF_THEN_ELSE);
					// Any unresolved branches were elsif branches for the current follow node.
					for (size_t ElsifIndex = 0; ElsifIndex < UnresolvedBranchBlocks.size(); ++ElsifIndex) {
						// Insert mappping from COND_BRANCH to follow block.
						int ElsifBlockNum = UnresolvedBranchBlocks[ElsifIndex];
						SMPBasicBlock *ElsifBlock = this->GetBlockByNum(ElsifBlockNum);
						assert(NULL != ElsifBlock);
						STARS_ea_t ElsifAddr = ElsifBlock->GetLastAddr();
						assert(STARS_BADADDR != ElsifAddr);
						pair<STARS_ea_t, int> MapItem(ElsifAddr, FollowNodeNum);
						pair<map<STARS_ea_t, int>::iterator, bool> InsertResult = this->JumpFollowNodesMap.insert(MapItem);
						if (!InsertResult.second) {
							// Were we inserting the same follow node again, or a conflicting follow node?
							map<STARS_ea_t, int>::iterator ExistingValueIter = InsertResult.first;
							int OldFollowBlockNum = ExistingValueIter->second;
							if (OldFollowBlockNum != FollowNodeNum) {
								SMP_msg("ERROR: SPARK CFG JumpFollowNodesMap conflict at %llx Old: %d New:%d\n",
									(uint64_t) ElsifAddr, OldFollowBlockNum, FollowNodeNum);
								StructuredConditional = false;
								break;
							}
							else {
								SMP_msg("INFO: SPARK CFG JumpFollowNodesMap redundant update at %llx\n", (uint64_t) ElsifAddr);
							}
						}
						// Find out if the unresolved branch should be translated as beginning a nested if-then, or a nested
						//  if-then-else. The key is whether the COND_BRANCH goes to the follow node directly (if-then) or
						//  not (if-then-else).
						bool NestedIfThenCase = FollowBlock->IsBlockPred(ElsifBlockNum);
						if (NestedIfThenCase) {
							this->SetControlFlowType(ElsifAddr, BRANCH_IF_THEN);
						}
						else {
							this->SetControlFlowType(ElsifAddr, BRANCH_IF_THEN_ELSE);
						}
					} // end for each UnresolvedBranchBlock
					UnresolvedBranchBlocks.clear();
				} // end if (IfThenCase) ... else ...
				// Insert mapping from COND_BRANCH to follow block.
				pair<STARS_ea_t, int> MapItem(LastAddr, FollowNodeNum);
				pair<map<STARS_ea_t, int>::iterator, bool> InsertResult = this->JumpFollowNodesMap.insert(MapItem);
				if (!InsertResult.second) {
					// Were we inserting the same follow node again, or a conflicting follow node?
					map<STARS_ea_t, int>::iterator ExistingValueIter = InsertResult.first;
					int OldFollowBlockNum = ExistingValueIter->second;
					if (OldFollowBlockNum != FollowNodeNum) {
						SMP_msg("ERROR: SPARK CFG JumpFollowNodesMap conflict at %llx Old: %d New:%d\n",
							(uint64_t) LastAddr, OldFollowBlockNum, FollowNodeNum);
						StructuredConditional = false;
						break;
					}
					else {
						SMP_msg("INFO: SPARK CFG JumpFollowNodesMap redundant update at %llx\n", (uint64_t) LastAddr);
					}
				}
			} // end if (bad follownode) ... else ...
		} // end if COND_BRANCH
	} // end for all blocks in reverse iteration

	// We should have no unresolved elsif branches
	return (StructuredConditional && UnresolvedBranchBlocks.empty());
} // end of SMPFunction::AnalyzeConditionalStatements()

// Find candidate block # for if-else follow node for HeadBlockNum; return -1 otherwise
int SMPFunction::FindConditionalFollowNode(int HeadBlockNum) {
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;

	// A well-structured candidate would be the biggest RPO-number block that has >= 2 predecessors
	//  and has HeadBlockNum as its IDom. 
	// EXCEPTION #1: Return instructions interrupt control flow. See below.
	// EXCEPTION #2: Short circuit conditionals can trick the IDom algorithm into choosing
	//   one of the blocks in the short circuit expression as the follow node, when the follow
	//   node should come after the entire short circuit expr.

	SMPBasicBlock *HeadBlock = this->RPOBlocks[HeadBlockNum];
	assert(nullptr != HeadBlock);
	STARS_ea_t HeadLastAddr = HeadBlock->GetLastAddr();
	ControlFlowType LastCFType = this->GetControlFlowType(HeadLastAddr);
	bool ShortCircuit = ((LastCFType == SHORT_CIRCUIT_BRANCH) || (LastCFType == SHORT_CIRCUIT_LOOP_EXIT));

	if (!ShortCircuit) {
		// Go through IDoms in reverse order until we find a well-structured candidate or hit HeadBlockNum.
		int IDomIndex = (int) this->IDom.size() - 1;
		while (IDomIndex > HeadBlockNum) {
			if (this->IDom[IDomIndex] == HeadBlockNum) {
				// COND_BRANCH block is the IDom for block # IDomIndex.
				if (2 <= this->RPOBlocks[IDomIndex]->GetNumPredsMinusBackEdges()) { // success
					FollowBlockNum = IDomIndex;
					break;
				}
			}
			--IDomIndex;
		}
	}
#if 0
	// If we failed, see if it was because of both branches of an if-then-else
	//  terminating with a RETURN. A call to a function that does not return, such
	//  as exit() or abort(), also behaves like a return here; both are control flow sinks.
	if (SMP_BLOCKNUM_UNINIT == FollowBlockNum) {
		IDomIndex = (int) this->IDom.size() - 1;
		size_t ReturnCounter = 0;
		int BlockNum = (int) this->RPOBlocks.size();
		int SaveBlockNum = SMP_BLOCKNUM_UNINIT;
		--BlockNum;
		while (BlockNum > HeadBlockNum) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[(size_t) BlockNum];
			if (CurrBlock->HasReturn() || CurrBlock->HasNonReturningCall()) {
				if (this->DoesBlockDominateBlock(HeadBlockNum, BlockNum)) {
					++ReturnCounter;
					SaveBlockNum = BlockNum;
				}
			}
			--BlockNum;
		}
	
		if (ReturnCounter > 1) {
			FollowBlockNum = SMP_BLOCKNUM_COMMON_RETURN;
		}
		else if (1 == ReturnCounter) {
			// We must have an if-then with a return at the end of the then-clause.
			//  Confirm this pattern in the CFG first.
			assert(0 <= SaveBlockNum);
			SMPBasicBlock *CurrBlock = this->RPOBlocks[(size_t) HeadBlockNum];
			// HeadBlockNum should have two successors. One dominates SaveBlockNum, and
			//  that is the then-clause direction; the other successor is the target of
			//  jumping around the then-clause and is our follow node.
			assert(2 == CurrBlock->GetNumSuccessors());
			list<SMPBasicBlock *>::const_iterator SuccBlock = CurrBlock->GetCondNonFallThroughSucc();
			// Normal case is to jump around the then-clause.
			int SuccBlockNum = (*SuccBlock)->GetNumber();
			if (!this->DoesBlockDominateBlock(SuccBlockNum, SaveBlockNum)) {
				FollowBlockNum = SuccBlockNum;
			}
			else { // test other branch; not sure how this could fail unless we have unstructured code.
				SuccBlock = CurrBlock->GetFallThroughSucc();
				int SuccBlockNum = (*SuccBlock)->GetNumber();
				if (!this->DoesBlockDominateBlock(SuccBlockNum, SaveBlockNum)) {
					FollowBlockNum = SuccBlockNum;
				}
			}
		}
	}
#endif

	// A similar case to the above is when both branches of an if-then-else flow into a loop header block
	//  for a loop that contains both branches.
	if ((SMP_BLOCKNUM_UNINIT == FollowBlockNum) && (0 < this->LoopCount)) {
		// See if we can find two blocks, each dominated by HeadBlockNum,
		//  each being loop tail blocks, with the same loop header block
		//  as a successor block, with the loop header block dominating HeadBlockNum.
		size_t LoopBackCounter = 0;
		int BlockNum = (int) this->RPOBlocks.size();
		int SaveLoopHeadBlockNum = SMP_BLOCKNUM_UNINIT;
		int LoopHeadBlockNum = SMP_BLOCKNUM_UNINIT;
		--BlockNum;
		while (BlockNum > HeadBlockNum) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[(size_t)BlockNum];
			if (CurrBlock->IsLoopTailBlock() && this->DoesBlockDominateBlock(HeadBlockNum, BlockNum)) {
				if (CurrBlock->HasLoopHeadAsSuccessor(LoopHeadBlockNum)) {
					if (this->DoesBlockDominateBlock(LoopHeadBlockNum, BlockNum)) {
						if (0 == LoopBackCounter) {
							SaveLoopHeadBlockNum = LoopHeadBlockNum;
						}
						else {
							if (SaveLoopHeadBlockNum != LoopHeadBlockNum) {
#if 0
								SMP_msg("WARNING: Inconsistent loop-back HeadBlockNums for conditional block %d in func at %llx\n",
									HeadBlockNum, (uint64_t) this->GetFirstFuncAddr());
								this->HasStructuredCFG = false;  // irrecoverable problem
								return SMP_BLOCKNUM_UNINIT;
#else
								LoopBackCounter = 0;
								FollowBlockNum = SMP_BLOCKNUM_UNINIT;
								break; // try to succeed using TrackConditionalBranchTerminus() below
#endif
							}
						}
						++LoopBackCounter;
					}
				}
			}
			--BlockNum;
		} // end of while (BlockNum > HeadBlockNum)
		if (2 <= LoopBackCounter) { // success; 2 or more if-elsif-else branches loop back to loop header
			FollowBlockNum = SaveLoopHeadBlockNum;
		}
	}

	if (SMP_BLOCKNUM_UNINIT == FollowBlockNum) {
		// See if we can track the two branches of the COND_BRANCH to a sensible follow node.
		SMPBasicBlock *CurrBlock = this->GetBlockByNum((size_t) HeadBlockNum);
		assert(nullptr != CurrBlock);
		assert(2 == CurrBlock->GetNumSuccessors());
		int ElseBlockNum = (*(CurrBlock->GetCondNonFallThroughSucc()))->GetNumber();
		assert(SMP_BLOCKNUM_UNINIT != ElseBlockNum);
		int ThenBlockNum = (*(CurrBlock->GetCondOtherSucc(ElseBlockNum)))->GetNumber();
		assert(SMP_BLOCKNUM_UNINIT != ThenBlockNum);

		// For an if-then-else, both successors should be dominated by the branch header block.
		//  If one is dominated but the other is not, then the non-dominated block is the follow block
		//  and we just have an if-then structure, e.g.
		//    branch_block
		//        |
		//        +-----+
		//        |     |
		//   |    |    then_block
		//   |    |     |
		//   +--block2--+
		//   
		//   block2 is reachable from somewhere above branch_block. We don't want to start
		//   at block2 and search downwards, because block2 is already the follow block for
		//   branch_block.
		int ThenIDom = this->IDom[(size_t) ThenBlockNum];
		int ElseIDom = this->IDom[(size_t) ElseBlockNum];
		bool IfThenElseCase = ((ThenIDom == ElseIDom) && (ThenIDom == HeadBlockNum));
		if (!IfThenElseCase) {
			// See if we fit the pattern above.
			if (ThenIDom == HeadBlockNum)
				FollowBlockNum = ElseBlockNum;
			else if (ElseIDom == HeadBlockNum)
				FollowBlockNum = ThenBlockNum;
			else {
				SMP_msg("ERROR: FindConditionalFollowNode in %s has no successors with IDom of HeadBlockNum %d\n",
					this->GetFuncName(), HeadBlockNum);
				FollowBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
			}
		}
		else {
			STARSBitSet ThenBlocksSeen, ElseBlocksSeen;
			int BlockAlreadySeenCounter = 0;
			ThenBlocksSeen.AllocateBits(this->GetNumBlocks());
			ElseBlocksSeen.AllocateBits(this->GetNumBlocks());
			int ThenFollowBlockNum = this->TrackConditionalBranchTerminus(HeadBlockNum, ThenBlockNum, ThenBlocksSeen, BlockAlreadySeenCounter);
			BlockAlreadySeenCounter = 0;
			int ElseFollowBlockNum = this->TrackConditionalBranchTerminus(HeadBlockNum, ElseBlockNum, ElseBlocksSeen, BlockAlreadySeenCounter);
			if ((SMP_BLOCKNUM_UNINIT != ThenFollowBlockNum) && (SMP_BLOCKNUM_UNINIT != ElseFollowBlockNum)) {
				// No errors; compute the meet function.
				if ((0 <= ThenFollowBlockNum) && (0 <= ElseFollowBlockNum)) {
					if (ThenFollowBlockNum == ElseFollowBlockNum) {
						// Why didn't we get this follow block from the dominator info?
						FollowBlockNum = ThenFollowBlockNum;
						if (ShortCircuit)
							SMP_msg("INFO: Got FollowBlockNum of %d for ShortCircuit block %d in %s via consistent termini.\n",
								FollowBlockNum, HeadBlockNum, this->GetFuncName());
						else
							SMP_msg("WARNING: Got FollowBlockNum of %d for block %d in %s via consistent termini after failing via dominator tree.\n",
								FollowBlockNum, HeadBlockNum, this->GetFuncName());
					}
					else {
						SMP_msg("ERROR: Inconsistent if-else termini block numbers: %d and %d for branch head: %d in func %s\n",
							ThenFollowBlockNum, ElseFollowBlockNum, HeadBlockNum, this->GetFuncName());
						FollowBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
					}
				}
				else if (0 <= ThenFollowBlockNum) {
					FollowBlockNum = ThenFollowBlockNum;
					SMP_msg("INFO: Got FollowBlockNum of %d for block %d in %s via THEN terminus after failing via dominator tree.\n",
						FollowBlockNum, HeadBlockNum, this->GetFuncName());
				}
				else {
					FollowBlockNum = ElseFollowBlockNum;
					SMP_msg("INFO: Got FollowBlockNum of %d for block %d in %s via ELSE terminus after failing via dominator tree.\n",
						FollowBlockNum, HeadBlockNum, this->GetFuncName());
				}
			}
			else {
				FollowBlockNum = SMP_BLOCKNUM_UNINIT;
				SMP_msg("ERROR: No FollowBlockNum for block %d in %s due to inconsistent termini after failing via dominator tree.\n",
					HeadBlockNum, this->GetFuncName());
			}
		}
	}

	return FollowBlockNum;
} // end of SMPFunction::FindConditionalFollowNode()

// Track CurrBlockNum until we reach block that BranchHeadBlockNum doesn't dominate, or dead end. Return block num, possibly SMP_BLOCKNUM_COMMON_RETURN.
//  A dead end is a block that returns or that calls a non-returning function.
//  We follow successors and we don't follow loop back edges.
int SMPFunction::TrackConditionalBranchTerminus(int BranchHeadBlockNum, int CurrBlockNum, STARSBitSet &BlocksSeen, int &BlockAlreadySeenCounter) {
	int TerminusBlockNum = SMP_BLOCKNUM_COMMON_RETURN;
	SMPBasicBlock *CurrBlock = this->GetBlockByNum((size_t) CurrBlockNum);
	assert(CurrBlock != nullptr);
	if (CurrBlock->HasReturn() || CurrBlock->HasNonReturningCall()) {
		TerminusBlockNum = SMP_BLOCKNUM_COMMON_RETURN;
	}
	else {
		list<int> WorkListBlockNums;
		STARS_ea_t LastAddr = CurrBlock->GetLastAddr();
		ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
		if (LastCFType == LOOP_BACK) {
			// Don't follow back edge, but get the other successor, if conditional.
			vector<SMPInstr *>::const_reverse_iterator LastInstIter = CurrBlock->GetRevInstCBegin();
			if (COND_BRANCH == (*LastInstIter)->GetDataFlowType()) {
				assert(CurrBlock->IsLoopTailBlock());
				if (!CurrBlock->IsDoubleLoopTailBlock()) {
					for (list<SMPBasicBlock *>::const_iterator SuccIter = CurrBlock->GetFirstConstSucc(); SuccIter != CurrBlock->GetLastConstSucc(); ++SuccIter) {
						int SuccBlockNum = (*SuccIter)->GetNumber();
						if (SuccBlockNum != CurrBlock->GetLoopHeaderNumber()) {
							WorkListBlockNums.push_back(SuccBlockNum);
						}
					}
				}
			}
		}
		else if (LastCFType == INVERTED_LOOP_EXIT) {
			// COND_BRANCH taken would remain in the loop; fall-through will exit the loop.
#if 0	// Could do INVERTED_LOOP_EXIT by falling out of any block in the loop, not just the header block.
			assert(CurrBlock->IsLoopHeaderBlock());
#endif
			list<SMPBasicBlock *>::const_iterator SuccIter = CurrBlock->GetFallThroughSucc();
			assert(SuccIter != CurrBlock->GetLastConstSucc());
			int SuccBlockNum = (*SuccIter)->GetNumber();
			WorkListBlockNums.push_back(SuccBlockNum);
		}
		else { // add all successors to the work list
			for (list<SMPBasicBlock *>::const_iterator SuccIter = CurrBlock->GetFirstConstSucc(); SuccIter != CurrBlock->GetLastConstSucc(); ++SuccIter) {
				int SuccBlockNum = (*SuccIter)->GetNumber();
				WorkListBlockNums.push_back(SuccBlockNum);
			}
		}
#define STARS_COND_BRANCH_WORKLIST_LIMIT 200
#define STARS_COND_BRANCH_ALREADY_SEEN_LIMIT 50
		while (!WorkListBlockNums.empty()) { // recurse
			int SuccBlockNum = WorkListBlockNums.back();
			WorkListBlockNums.pop_back();
			bool BlockAlreadyInList = IsIntInList(WorkListBlockNums, SuccBlockNum);
			bool BlockAlreadyProcessed = BlocksSeen.GetBit((size_t) SuccBlockNum);
			if (BlockAlreadyProcessed)
				++BlockAlreadySeenCounter;
#if 0
			if (BlockAlreadyProcessed || BlockAlreadyInList) {
#else
			if (BlockAlreadyInList) {
#endif
				// We are looping around the same blocks, which should 
				//  not happen for a conditional branch. Terminate.
				SMP_msg("ERROR: Looping on branch terminus block numbers: %d and %d for branch head: %d current block: %d Bitset: %d InList: %d in func %s\n",
					CurrBlockNum, SuccBlockNum, BranchHeadBlockNum, CurrBlockNum, BlockAlreadyProcessed, BlockAlreadyInList, this->GetFuncName());
				TerminusBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
				WorkListBlockNums.clear();
				this->HasStructuredCFG = false; // recovery of blocks into if-elsif-elsif-else is not possible
				break;
			}
			else if (STARS_COND_BRANCH_WORKLIST_LIMIT < WorkListBlockNums.size()) {
				SMP_msg("ERROR: WorkList explosion on branch terminus block numbers: %d and %d for branch head: %d current block: %d in func %s\n",
					CurrBlockNum, SuccBlockNum, BranchHeadBlockNum, CurrBlockNum, this->GetFuncName());
				TerminusBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
				WorkListBlockNums.clear();
				this->HasStructuredCFG = false; // recovery of blocks into if-elsif-elsif-else is not possible
				break;
			}
			else if (STARS_COND_BRANCH_ALREADY_SEEN_LIMIT < BlockAlreadySeenCounter) {
				SMP_msg("ERROR: BlockAlreadySeen limit reached on branch terminus block numbers: %d and %d for branch head: %d current block: %d in func %s\n",
					CurrBlockNum, SuccBlockNum, BranchHeadBlockNum, CurrBlockNum, this->GetFuncName());
				TerminusBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
				WorkListBlockNums.clear();
				this->HasStructuredCFG = false; // recovery of blocks into if-elsif-elsif-else is not possible
				break;
			}

			BlocksSeen.SetBit((size_t) SuccBlockNum);
			if (!this->DoesBlockDominateBlock(BranchHeadBlockNum, SuccBlockNum)) {
				// We reached a block that is not dominated by BranchHeadBlockNum.
				//  Check for consistency with previous such blocks.
				if (0 <= TerminusBlockNum) { // had previous terminus
					if (SuccBlockNum != TerminusBlockNum) {
						SMP_msg("ERROR: Inconsistent branch terminus block numbers: %d and %d for branch head: %d current block: %d in func %s\n",
							TerminusBlockNum, SuccBlockNum, BranchHeadBlockNum, CurrBlockNum, this->GetFuncName());
						TerminusBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
						WorkListBlockNums.clear();
					}
				}
				else { // first good terminus
					TerminusBlockNum = SuccBlockNum;
				}
			}
			else { // still in path dominated by BranchHeadBlockNum
				int NewTerminusBlockNum = this->TrackConditionalBranchTerminus(BranchHeadBlockNum, SuccBlockNum, BlocksSeen, BlockAlreadySeenCounter);
				// Perform the meet function over the old and new terminus block numbers.
				if (NewTerminusBlockNum == SMP_BLOCKNUM_UNINIT) {
					SMP_msg("ERROR: Bad branch terminus block number from recursion for branch head: %d current block: %d in func %s\n",
						BranchHeadBlockNum, CurrBlockNum, this->GetFuncName());
					TerminusBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
					WorkListBlockNums.clear();
				}
				else if (TerminusBlockNum == SMP_BLOCKNUM_COMMON_RETURN) {
						TerminusBlockNum = NewTerminusBlockNum; //  new block num takes precedence; might be unchanged
				}
				else if (NewTerminusBlockNum == SMP_BLOCKNUM_COMMON_RETURN) {
					; // old block num takes precedence
				}
				else if (NewTerminusBlockNum != TerminusBlockNum) {
					SMP_msg("ERROR: Inconsistent branch terminus block numbers: %d and %d for branch head: %d current block: %d in func %s\n",
						TerminusBlockNum, NewTerminusBlockNum, BranchHeadBlockNum, CurrBlockNum, this->GetFuncName());
					TerminusBlockNum = SMP_BLOCKNUM_UNINIT; // error signal
					WorkListBlockNums.clear();
				}
			}
		} // end while work list is not empty
	}

	return TerminusBlockNum;
} // end of SMPFunction::TrackConditionalBranchTerminus()

// Find guarded loops and fill the GuardToLoopMap and LoopToGuardMap
void SMPFunction::FindGuardedLoops(void) {
	assert(this->HasStructuredControlFlow());
	// We look for the following code structure:
	// if (cond) then
	//   [straight-line code]
	//   loop
	//     [loop body]
	//   end loop;
	//   [any code]
	// end if;
	//
	// The key is detecting the sequence of a BRANCH_IF_THEN, then straight-line code, then a loop header block.
	//  When this sequence is detected, we map the first address in the loop header block to the BRANCH_IF_THEN
	//  address, and vice versa, in the two maps.
	for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockIndex);
		if (CurrBlock->HasConditionalBranch()) {
			STARS_ea_t LastInstAddr = CurrBlock->GetLastAddr();
			assert(STARS_BADADDR != LastInstAddr);
			ControlFlowType LastCFType = this->GetControlFlowType(LastInstAddr);
			if (LastCFType == BRANCH_IF_THEN) {
				list<SMPBasicBlock *>::const_iterator FallThroughBlockIter = CurrBlock->GetFallThroughSucc();
				bool FoundGuardedLoop = false;
				STARS_ea_t LoopAddr = STARS_BADADDR;
				SMPBasicBlock *FallThroughBlock = (*FallThroughBlockIter);
				if (FallThroughBlock->IsLoopHeaderBlock()) {
					// The simplest case: no [straight-line code] between COND_BRANCH and loop header.
					FoundGuardedLoop = true;
					LoopAddr = FallThroughBlock->GetFirstNonMarkerAddr();
				}
				else {
					// FallThroughBlock might be a block that only falls through to the loop header block,
					//  the slightly less simple case shown as [straight-line code] in the comments above.
					if (1 == FallThroughBlock->GetNumSuccessors()) {
						STARS_ea_t LastFallThroughBlockAddr = FallThroughBlock->GetLastAddr();
						ControlFlowType FTLastCFType = this->GetControlFlowType(LastFallThroughBlockAddr);
						if (FALL_THROUGH == FTLastCFType) {
							// Just falling through to next block. Is it a loop header block?
							list<SMPBasicBlock *>::const_iterator NextBlockIter = FallThroughBlock->GetFallThroughSucc();
							if (NextBlockIter == FallThroughBlock->GetLastConstSucc()) {
								// Probably special case of direct jump that acts as fall-through control flow.
								SMPInstr *LastInst = this->GetInstFromAddr(LastFallThroughBlockAddr);
								assert(JUMP == LastInst->GetDataFlowType());
								NextBlockIter = FallThroughBlock->GetFirstConstSucc();
								assert(NextBlockIter != FallThroughBlock->GetLastConstSucc());
							}
							SMPBasicBlock *NextBlock = (*NextBlockIter);
							if (NextBlock->IsLoopHeaderBlock()) {
								// The less simple case: some [straight-line code] between COND_BRANCH and loop header.
								FoundGuardedLoop = true;
								LoopAddr = NextBlock->GetFirstNonMarkerAddr();
							}
						}
					}
				}
				if (FoundGuardedLoop) {
					assert(STARS_BADADDR != LoopAddr);
					assert(STARS_BADADDR != LastInstAddr);
					pair<STARS_ea_t, STARS_ea_t> GuardToLoopValue(LastInstAddr, LoopAddr);
					pair<map<STARS_ea_t, STARS_ea_t>::iterator, bool> InsertResult;
					pair<STARS_ea_t, STARS_ea_t> LoopToGuardValue(LoopAddr, LastInstAddr);
					InsertResult = this->GuardToLoopMap.insert(GuardToLoopValue);
					assert(InsertResult.second); // should never visit COND_BRANCH or loop address twice
					InsertResult = this->LoopToGuardMap.insert(LoopToGuardValue);
					assert(InsertResult.second); // should never visit COND_BRANCH or loop address twice
					SMP_msg("INFO: SPARK: Found guarded loop at %llx with guard at %llx\n", (uint64_t) LoopAddr, (uint64_t) LastInstAddr);
				}
			}
		} // end if block has conditional branch
	} // end for all basic blocks

	return;
} // end of SMPFunction::FindGuardedLoops()

// Are we already translating a SPARK_LOOP when we encounter another loop?
bool SMPFunction::IsSPARKLoopInTranslationStack(void) const {
	list<SPARKTranslationCFType>::const_reverse_iterator CFIter;
	bool FoundLoop = false;
	for (CFIter = this->SPARKControlStack.crbegin(); CFIter != this->SPARKControlStack.crend(); ++CFIter) {
		if (SPARK_LOOP == (*CFIter)) {
			FoundLoop = true;
			break;
		}
	}
	return FoundLoop;
} // end of SMPFunction::IsSPARKLoopInTranslationStack()

// Find memory writes (DEFs) with possible aliases
void SMPFunction::AliasAnalysis2(void) {
	// First task: Mark which memory DEFs MIGHT be aliased because an
	//  indirect memory write occurs somewhere in the DEF-USE chain.
	//  Memory DEF-USE chains with no possible aliasing can be subjected
	//  to type inference and type-based optimizing annotations, e.g. a
	//  register spill to memory followed by retrieval from spill memory
	//  followed by NUMERIC USEs should be typed as a continuous NUMERIC
	//  chain if there is no possibility of aliasing.

	// Preparatory step: For each indirect write, mark the block containing the
	//  write instruction. If there are no indirect writes in the function,
	//  leave all DEFs marked as unaliased and exit.

	for (list<SMPBasicBlock *>::iterator BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		bool CurrBlockHasIndirectWrite = false;
		SMPBasicBlock *CurrBlock = (*BlockIter);
		for (vector<SMPInstr *>::iterator BlkInstIter = CurrBlock->GetFirstInst(); BlkInstIter != CurrBlock->GetLastInst(); ++BlkInstIter) {
			SMPInstr *CurrInst = (*BlkInstIter);
			if (CurrInst->HasIndirectMemoryWrite()) {
				this->HasIndirectWrites = true;
				// Until we get true aliasing analysis, any indirect write
				//  is classified as may-be-aliased.
				if (!CurrBlockHasIndirectWrite) { // first one
					++STARS_IndirectMemWriteBlockCount;
					CurrBlockHasIndirectWrite = true;
				}
				CurrBlock->SetMaybeAliased(true);
				if (CurrInst->DetectUnsafeMemWrite()) {
					this->HasUnsafeIndirectWrites = true;
					CurrInst->SetUnsafeMemWrite();
					++STARS_UnsafeIndirectMemWriteCount;
				}
				else {
					++STARS_SafeIndirectMemWriteCount;
					SMP_msg("INFO: Found safe indirect write in AliasAnalysis at %p\n", CurrInst->GetAddr());
				}
			}
		} // end for all insts in block
	} // end for all blocks in function

	if (!(this->HasIndirectWrites))
		return;

	// Algorithm: Maintain a set of live memory DEFs and their addresses. All LiveIn
	//  memory DEFs start out in the set, with the live-in pseudo-address. For each inst,
	//  if a memory DEF is redefined, replace the memory DEF in the tracking set with
	//  the current memory DEF. If current inst has an indirect write, set the MaybeAliased
	//  flag for each DEF in the tracking set and remove them from the set. Add the current
	//  memory DEF as the new (lone) DEF in the tracking set.
	// Do all of the above starting at the first RPO basic block and follow into successors,
	//  marking each block as processed on exit and not processing any block twice. When exiting
	//  a block, remove local memory DEFs from the tracking set.
	this->ResetProcessedBlocks();

	// Iterate through blocks starting with block 0.
	for (size_t BlockNum = 0; BlockNum < this->GetNumBlocks(); ++BlockNum) {
		this->ChainAliasHelper(BlockNum);
	}

	return;
} // end of SMPFunction::AliasAnalysis2()

// recursive helper for AliasAnalysis2()
void SMPFunction::ChainAliasHelper(size_t BlockNum) {
	SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockNum);

	if (CurrBlock->HasIndirectMemWrite()) {
		STARSDefinitionSet MemTrackingSet;
		// The MemTrackingSet is only relevant for blocks that have indirect writes.
		//  It should hold live memory operands only.
		// Add the other LiveIn memory operands to the tracking set.
		STARS_ea_t BlockPseudoAddr = STARS_MakeBlockNumPseudoID(BlockNum);
		for (STARSOpndSetIter LiveInIter = CurrBlock->GetFirstLiveIn(); LiveInIter != CurrBlock->GetLastLiveIn(); ++LiveInIter) {
			STARSOpndTypePtr LiveInOp = (*LiveInIter);
			if (LiveInOp->IsMemOp()) {
				// If it is a PhiOp, we use the BlockPseudoAddr. Otherwise, it
				//  should only be LiveOut from one predecessor block, and we
				//  trace it to its DefAddr.
				if (CurrBlock->FindPhi(LiveInOp) != CurrBlock->GetLastPhi()) {
					pair<STARSOpndTypePtr, STARS_ea_t> InsertPair(LiveInOp, BlockPseudoAddr);
					MemTrackingSet.insert(InsertPair);
				}
				else {
					bool AlreadyMarkedMaybeAliased = false;
					STARS_ea_t LiveInDefAddr = CurrBlock->TraceLiveInOpToDefAddr(LiveInOp, AlreadyMarkedMaybeAliased);
					if (STARS_BADADDR == LiveInDefAddr) {
						SMP_msg("FATAL ERROR: ChainAliasHelper bad LiveInDefAddr in block %zu\n", BlockNum);
						SMP_msg("Operand: ");
						PrintOperand(LiveInOp);
						SMP_msg("\n");
						this->Dump();
						assert(STARS_BADADDR != LiveInDefAddr); // kablooey!
					}
					if (!AlreadyMarkedMaybeAliased) {
						pair<STARSOpndTypePtr, STARS_ea_t> InsertPair(LiveInOp, LiveInDefAddr);
						MemTrackingSet.insert(InsertPair);
					}
				}
			}
		} // end for all LiveIn operands

		// Iterate through instructions for blocks with indirect memory writes. 
		if (!MemTrackingSet.empty()) {
			for (vector<SMPInstr *>::iterator BlkInstIter = CurrBlock->GetFirstInst(); BlkInstIter != CurrBlock->GetLastInst(); ++BlkInstIter) {
				SMPInstr *CurrInst = (*BlkInstIter);
				STARS_ea_t InstAddr = CurrInst->GetAddr();
				if (CurrInst->HasIndirectMemoryWrite()) {
					// Mark the DEF for each operand in the MemTrackingSet as MaybeAliased,
					//  then remove it from the tracking set.
					STARSDefinitionSet::iterator TrackingIter = MemTrackingSet.begin();
					while (TrackingIter != MemTrackingSet.end()) {
						STARSOpndTypePtr LocalOp = (*TrackingIter).first;
						STARS_ea_t DefAddr = (*TrackingIter).second;
						if (STARS_IsBlockNumPseudoID(DefAddr)) { // Phi DEF
							// We need to mark the DEFs for each Phi USE as MaybeAliased.
							size_t DefBlockNum = STARS_GetBlockNumFromPseudoID(DefAddr);
							SMPBasicBlock *DefBlock = this->GetBlockByNum(DefBlockNum);
							assert(nullptr != DefBlock);
							this->ResetSCCPVisitedBlocks(); // overloaded use for MarkAllPhiUsesAliased()
							DefBlock->MarkAllPhiUsesAliased(LocalOp);
						}
						else {
							SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
							// Make sure that the local operand is USEd after the indirect write,
							//  else its live range does not overlap and there is no alias.
							STARSDefUseIter DefIter = DefInst->FindDef(LocalOp);
							assert(DefIter != DefInst->GetLastDef());
							bool DeadMemOp = CurrBlock->IsMemOpDead(LocalOp, BlkInstIter, DefIter->GetSSANum());
							if (!DeadMemOp) {
								STARSDefUseIter DefIter = DefInst->SetDefIndWrite(LocalOp, true);
							}
						}
						++TrackingIter;
					} // end while TrackingIter is not at end of MemTrackingSet
					MemTrackingSet.clear();

					// Now, initialize the tracking set with just this latest memory operand.
					STARSOpndTypePtr DefOp = CurrInst->GetMemDef();
					STARSDefinition InsertPair(DefOp, InstAddr);
					pair<STARSDefinitionSet::iterator, bool> InsertResult = MemTrackingSet.insert(InsertPair);
					assert(InsertResult.second);
				}
				else if (CurrInst->HasDestMemoryOperand()) {
					// Replace re-DEFed operands in MemTrackingSet, or add new operand.
					STARSOpndTypePtr DefOp = CurrInst->GetMemDef();
					STARSDefinitionSet::iterator TrackingIter = MemTrackingSet.begin();
					while (TrackingIter != MemTrackingSet.end()) {
						STARSOpndTypePtr LocalOp = (*TrackingIter).first;
						if (IsEqOpIgnoreBitwidth(DefOp, LocalOp)) {
							TrackingIter = MemTrackingSet.erase(TrackingIter);
							STARSDefinition InsertPair(DefOp, InstAddr);
							pair<STARSDefinitionSet::iterator, bool> InsertResult = MemTrackingSet.insert(InsertPair);
							// assert(InsertResult.second);
							TrackingIter = InsertResult.first;
							// It is conceivable that two different DEFs of a memory operand, at
							//  two different addresses, are being re_DEFed, so we continue the loop.
						}
						++TrackingIter;
					} // end while TrackingIter is not at end of MemTrackingSet
				}
			} // end for all insts in block
		}
	}

	return;
} // end of SMPFunction::ChainAliasHelper()

// Find memory writes (DEFs) with possible aliases
void SMPFunction::AliasAnalysis(void) {
	// First task: Mark which memory DEFs MIGHT be aliased because an
	//  indirect memory write occurs somewhere in the DEF-USE chain.
	//  Memory DEF-USE chains with no possible aliasing can be subjected
	//  to type inference and type-based optimizing annotations, e.g. a
	//  register spill to memory followed by retrieval from spill memory
	//  followed by NUMERIC USEs should be typed as a continuous NUMERIC
	//  chain if there is no possibility of aliasing.

	// Preparatory step: For each indirect write, mark all def-use chains
	//  (maintained at the basic block level) that include the indirect
	//  write instruction. If there are no indirect writes in the function,
	//  leave all DEFs marked as unaliased and exit.

	for (list<SMPBasicBlock *>::iterator BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		for (vector<SMPInstr *>::iterator BlkInstIter = CurrBlock->GetFirstInst(); BlkInstIter != CurrBlock->GetLastInst(); ++BlkInstIter) {
			SMPInstr *CurrInst = (*BlkInstIter);
			if (CurrInst->HasIndirectMemoryWrite()) {
				this->HasIndirectWrites = true;
				// Until we get true aliasing analysis, any indirect write
				//  is classified as may-be-aliased.
				CurrBlock->SetMaybeAliased(true);
				if (CurrInst->DetectUnsafeMemWrite()) {
					this->HasUnsafeIndirectWrites = true;
					CurrInst->SetUnsafeMemWrite();
					++STARS_UnsafeIndirectMemWriteCount;
				}
				else {
					++STARS_SafeIndirectMemWriteCount;
					SMP_msg("INFO: Found safe indirect write in AliasAnalysis at %p\n", CurrInst->GetAddr());
				}
			}
		} // end for all insts in block
	} // end for all blocks in function

#if 1
	if (!(this->HasIndirectWrites))
		return;
#endif
	// Step one: Find only the memory DEFs to start with.
	bool FoundIndWrite = false;
	for (list<SMPInstr *>::iterator InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		STARS_ea_t InstAddr = CurrInst->GetAddr();
		if (CurrInst->HasDestMemoryOperand()) {
			// Starting with the DEF instruction, traverse the control flow
			//  until we run into (A) the re-definition of the operand, including
			//  a re-definition of any of its addressing registers, or (B) an
			//  indirect write. Return false if condition A terminates the
			//  search, and true if condition B terminates the search.
			this->ResetProcessedBlocks();
			STARSOpndTypePtr MemDefOp = CurrInst->MDGetMemDefOp();
			assert(! MemDefOp->IsVoidOp());
			set<DefOrUse, LessDefUse>::iterator CurrMemDef = CurrInst->FindDef(MemDefOp);
			assert(CurrMemDef != CurrInst->GetLastDef());
			int SSANum = CurrMemDef->GetSSANum();
			FoundIndWrite = this->FindPossibleChainAlias(CurrInst, MemDefOp, SSANum);
			if (FoundIndWrite) {
				// Mark the DEF as aliased.
				CurrMemDef = CurrInst->SetDefIndWrite(CurrMemDef->GetOp(), true);
			}
		} // end if inst has dest memory operand
		if (global_STARS_program->ShouldSTARSShadowFuncPtrs() || global_STARS_program->ShouldSTARSMaximizeCFGImprovement()) {

#if 0
			if (!CurrInst->HasDestMemoryOperand() && CurrInst->IsMarkerInst()) { // analyze memory DEFs in marker inst
				set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->GetFirstDef();
				while (DefIter != CurrInst->GetLastDef()) {
					STARSOpndTypePtr DefOp = DefIter->GetOp();
					if (DefOp->IsMemOp()) {
						int SSANum = DefIter->GetSSANum();
						this->ResetProcessedBlocks();
						FoundIndWrite = this->FindPossibleChainAlias(CurrInst, DefOp, SSANum);
						if (FoundIndWrite) {
							// Mark the DEF as aliased.
							DefIter = CurrInst->SetDefIndWrite(DefOp, true);
						}
					}
					++DefIter;
				}
			}
#endif
		}
	} // end for all instructions

	return;
} // end of SMPFunction::AliasAnalysis()

// Does the DefOp DEF_USE chain have an indirect mem write starting at CurrInst?
bool SMPFunction::FindPossibleChainAlias(SMPInstr *CurrInst, const STARSOpndTypePtr &DefOp, int SSANum) {
#if 0
	bool DebugFlag = false;
	if (0 == strcmp("sdissect", this->GetFuncName())) {
		// Next line is just a good place to set a break point.
		DebugFlag = true;
	}
#endif

	// Starting with the DEF instruction, traverse the control flow
	//  until we run into (A) the re-definition of the operand, including
	//  a re-definition of any of its addressing registers, or (B) an
	//  indirect write. Return false if condition A terminates the
	//  search, and true if condition B terminates the search.
	SMPBasicBlock *CurrBlock = CurrInst->GetBlock();
	if (!(CurrBlock->IsProcessed())) {
		CurrBlock->SetProcessed(true);
	}
	else
		return false; // block already processed

	// Proceed by cases:
	STARS_ea_t DefAddr = CurrInst->GetAddr();
	vector<SMPInstr *>::iterator InstIter = CurrBlock->GetInstIterFromAddr(DefAddr);
	bool IndWriteFound = false, ReDef = false;
	++InstIter;
	// Case 1: Local name. Return the IndWrite flag for the local Def-Use
	//  chain begun by CurrInst.
	bool UseAfterIndirectWrite = CurrBlock->HasUseAfterIndWrite(DefOp, InstIter, IndWriteFound, ReDef);
	bool LiveOutFlag = CurrBlock->IsLiveOut(DefOp);
	if (CurrBlock->IsLocalName(DefOp)) {
		return (UseAfterIndirectWrite);
	}

	// Case 2: Global name.
#if 1
	if (UseAfterIndirectWrite) {
		return true;
	}
	else if (IndWriteFound) {
		// We found an indirect write, but no USE of DefOp after it.
		//  If DefOp is LiveOut, then there is a USE of DefOp in a
		//  successor block, so DefOp can be aliased.
		return (LiveOutFlag && !ReDef);
	}
#else
	// Case 2A: If Def-Use chain within this block for this memory operand
	//  has its IndWrite flag set to true, then stop and return true.
	else if (CurrBlock->GetGlobalDUChainIndWrite(DefOp, DefAddr)) {
		return true;
	}

	// Case 2B: Else if Def-Use chain is not the last chain in this block
	//  for this operand, then there must be a later redefinition of the
	//  memory operand (with new SSA number assigned) later in this block.
	//  Because we did not fall into case 2A, we know there is no IndWrite
	//  within the current memory operand's chain, so we return false.
	else if (!CurrBlock->IsLastGlobalChain(DefOp, DefAddr)) {
		return false;
	}

	// Case 2C: Else if current memory operand is NOT LiveOut, even though
	//  this is the last def-use chain in the block, then there is no more
	//  traversing of the control flow graph to be done. The chain has ended
	//  without encountering an IndWrite, so return false.
	else if (!(CurrBlock->IsLiveOut(DefOp))) {
		return false;
	}
#endif

	// Case 2D: We have passed all previous checks, so we must have a memory
	//  operand that reaches the end of the block without encountering an
	//  IndWrite and is LiveOut. Its may-alias status will be determined by
	//  following the control flow graph for all successor blocks and examining
	//  the def-use chains in those blocks.
	list<SMPBasicBlock *>::iterator SuccBlock;
	SuccBlock = CurrBlock->GetFirstSucc();
	bool FoundAliasedWrite = false;
	if (LiveOutFlag && !ReDef) {
		do {
			if ((*SuccBlock)->IsLiveIn(DefOp)) {
				FoundAliasedWrite = this->FindChainAliasHelper(SuccBlock, DefOp);
			}
			++SuccBlock;
		} while (!FoundAliasedWrite && (SuccBlock != CurrBlock->GetLastSucc()));
	}

	return FoundAliasedWrite;
} // end of SMPFunction::FindPossibleChainAlias()

// recursive helper for global DU-chains that traverse CFG
bool SMPFunction::FindChainAliasHelper(list<SMPBasicBlock *>::iterator BlockIter, const STARSOpndTypePtr &DefOp) {
	bool DebugFlag = false;
	SMPBasicBlock *CurrBlock = (*BlockIter);
	if (0 == strcmp("mem2chunk_check", this->GetFuncName())) {
		// Next line is just a good place to set a break point.
		DebugFlag = true;
	}

	if (!(CurrBlock->IsProcessed())) {
		CurrBlock->SetProcessed(true);
	}
	else
		return false; // block already processed

	// The LVA sets can be used to decide whether it is possible that
	//  the incoming DU chain overlaps a may-alias write. We can express
	//  the decision making in a truth table:
	//
	//  Case #    LiveIn?   Killed?   AliasedWrite in block?  Action to take
	//  -------   -------   -------   ----------------------  --------------
	//    1          N          N                N             return false
	//    2          N          N                Y             return false
	//    3          N          Y                N             return false
	//    4          N          Y                Y             return false
	//    5          Y          N                N             recurse into successors
	//    6          Y          N                Y             return true
	//    7          Y          Y                N             return false
	//    8          Y          Y                Y             check location of aliased write
	//
	// In the last case, if there is an aliased write before the
	// incoming DEF is killed and after it is used, then the
	// incoming DU chain overlaps an aliased write, otherwise
	// it does not.


	// If not LiveIn, incoming DU chain does not run through this block
	//  at all, so return false.
	if (!(CurrBlock->IsLiveIn(DefOp)))
		return false;  // cases 1-4

	bool killed = CurrBlock->IsVarKill(DefOp);
	bool BlockHasAliasedWrite = CurrBlock->MaybeAliasedWrite();

	if (BlockHasAliasedWrite) {
		// If DefOp is LiveIn and is not killed, then any aliased
		//  write in the block overlaps the incoming DU chain.
		if (!killed) {
			return true;  // case 6
		}
		// If DefOp is LiveIn and is killed, then the location
		//  of the aliased write is the determining factor.
		else { // case 8; depends on finding indirect write, then USE, before re-DEF.
			vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst();
			bool IndWriteFound, ReDef;
			return CurrBlock->HasUseAfterIndWrite(DefOp, InstIter, IndWriteFound, ReDef);
		}
	}
	else {
		// If killed, no aliased write, then cannot overlap an aliased write.
		if (killed)
			return false; // case 7
		else {
			// Need to recurse into all successors, because we passed through
			//  the block without seeing an aliased write and without killing
			//  the DefOp.
			list<SMPBasicBlock *>::iterator SuccBlock;
			SuccBlock = CurrBlock->GetFirstSucc();
			bool FoundAliasedWrite = false;
			while (!FoundAliasedWrite && (SuccBlock != CurrBlock->GetLastSucc())) {
				FoundAliasedWrite = this->FindChainAliasHelper(SuccBlock, DefOp);
				++SuccBlock;
			};

			if (DebugFlag) {
				SMP_msg("FindChainAliasHelper is returning %d\n", FoundAliasedWrite);
			}
			return FoundAliasedWrite;
		}
	}
	assert(false); // statement should be unreachable
	return false;
} // end of SMPFunction::FindChainAliasHelper()

// Remove a basic block and its instructions.
void SMPFunction::RemoveBlock(SMPBasicBlock *CurrBlock, list<SMPBasicBlock *>::iterator &BlockIter, bool IBTarget) {
	if (IBTarget) { 
		// Block could be IBTarget and thus actually be reachable. Cover our bases by emitting an IBT annotation.
		SMPInstr *FirstInst = (*(CurrBlock->GetFirstInst()));
		global_STARS_program->PrintUnknownCodeXref(CurrBlock->GetFirstAddr(), FirstInst->GetSize(), ZST_UNREACHABLEBLOCK);

		// It cannot hurt to add INSTR BELONGTO annotations to the main annotations file.
		STARS_ea_t FuncAddr = this->GetFirstFuncAddr();
		FILE *AnnotFile = global_STARS_program->GetAnnotFile();
		assert(NULL != AnnotFile);
		for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			STARS_ea_t InstAddr = CurrInst->GetAddr();
			SMP_fprintf(AnnotFile, "%18llx %6zu INSTR BELONGTO %llx \n",
				(unsigned long long) InstAddr, CurrInst->GetSize(), (unsigned long long) FuncAddr);
		}
	}

	// Remove this block from the predecessors list of its successors.
	list<SMPBasicBlock *>::iterator SuccIter;
	STARS_ea_t TempAddr = CurrBlock->GetFirstAddr();
	for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
		(*SuccIter)->ErasePred(TempAddr);
	}

	// Remove this block from the successors list of its predecessors.
	list<SMPBasicBlock *>::iterator PredIter;
	for (PredIter = CurrBlock->GetFirstPred(); PredIter != CurrBlock->GetLastPred(); ++PredIter) {
		(*PredIter)->EraseSucc(TempAddr);
	}

	// Transfer the unreachable block to the program-wide container of unreachable code.
	this->GetProg()->AddUnreachableBlock(CurrBlock);

	// Remove the unreachable instructions from the function inst list.
	vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst();
	STARS_ea_t FirstBadAddr = (*InstIter)->GetAddr();
	InstIter = --(CurrBlock->GetLastInst()); // get last real instruction
	STARS_ea_t LastBadAddr = (*InstIter)->GetAddr();
	this->EraseInstRange(FirstBadAddr, LastBadAddr);

	// Remove the block from the blocks list.
	BlockIter = this->Blocks.erase(BlockIter);
	this->BlockCount -= 1;
	return;
} // end of SMPFunction::RemoveBlock()

// Func is empty, so add all blocks that call it to Program->BlocksPendingRemoval.
void SMPFunction::RemoveCallingBlocks(void) const {
	for (set<STARS_ea_t>::iterator CallSiteIter = this->AllCallSites.begin(); CallSiteIter != this->AllCallSites.end(); ++CallSiteIter) {
		STARS_ea_t CallInstAddr = (*CallSiteIter);
		STARS_ea_t CallingFuncAddr = STARS_BADADDR;
		if (this->GetProg()->IsInstAddrStillInFunction(CallInstAddr, CallingFuncAddr)) {
			SMPFunction *CallingFunc = this->GetProg()->FindFunction(CallingFuncAddr);
			if (nullptr == CallingFunc) {
				SMP_msg("ERROR: Cannot find function with start addr %llx and call inst at %llx\n", 
					(unsigned long long) CallingFuncAddr, (unsigned long long) CallInstAddr);
			}
			else {
				SMPBasicBlock *CallingBlock = CallingFunc->GetBlockFromInstAddr(CallInstAddr);
				assert(NULL != CallingBlock);
				this->GetProg()->AddBlockToRemovalList(CallingBlock);
			}
		}
	}
	return;
} // end of SMPFunction::RemoveCallingBlocks()

// Link basic blocks to their predecessors and successors.
void SMPFunction::SetLinks(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	list<SMPBasicBlock *> UnresolvedBranchWorkList;
	STARS_ea_t InstAddr;
	bool DebugFlag = (0x28f3a0 == this->GetFirstFuncAddr());
#if SMP_DEBUG_DATAFLOW_VERBOSE
	SMP_msg("SetLinks called for %s\n", this->GetFuncName());
#endif

#if SMP_DEBUG_DATAFLOW_VERBOSE
	SMP_msg("SetLinks finished mapping: %s\n", this->GetFuncName());
#endif
	// Set successors of each basic block, also setting up the predecessors in the
	//  process.
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		vector<SMPInstr *>::iterator InstIter = (--(CurrBlock->GetLastInst()));
		SMPInstr *CurrInst = (*InstIter);
		InstAddr = CurrInst->GetAddr();
		STARS_ea_t BlockAddr = CurrBlock->GetFirstAddr();
		if (DebugFlag) {
			SMP_msg("DEBUG: SetLinks processing block from %llx to %llx\n",
				(unsigned long long) BlockAddr, (unsigned long long) InstAddr);
		}
		bool CondTailCall = false;
		if (CurrBlock->HasReturn()) {
			if (!(CurrInst->IsCondTailCall())) {
				// We either have a return instruction or an unconditional
				//  tail call instruction. We don't want to link to the
				//  tail call target, and there is no link for a return
				if (DebugFlag) {
					SMP_msg("DEBUG: SetLinks not linking successors for return block\n");
				}
				continue;
			}
			else {
				// We have a conditional tail call. We don't want to
				//  link to the tail call target, but we do want fall
				//  through to the next instruction.
				CondTailCall = true;
			}
		}
		
		// Last instruction in block; set successors
		SMPitype FlowType = CurrInst->GetDataFlowType();
		if (HALT == FlowType) {
			if (DebugFlag) {
				SMP_msg("DEBUG: SetLinks not linking successors for HALT block\n");
			}
			continue;
		}
		bool CallFlag = (CALL == FlowType);
		bool IndirJumpFlag = (INDIR_JUMP == FlowType);
		bool IndirCallFlag = (INDIR_CALL == FlowType);
		bool SysCallFlag = (IndirCallFlag && CurrInst->MDIsSystemCall());
		// NOTE: Due to phase re-ordering, we cannot yet identify tail calls,
		//  so CondTailCall and TailCallFlag will always be false, which is harmless.
		//  SMPInstr::SetTailCall() will do a little cleanup later.
		bool TailCallFlag = CondTailCall && CurrInst->IsCondTailCall();
		bool LinkedToTarget = false;
		bool FixedCallFlag = (CallFlag && CurrInst->IsFixedCallJump());
		list<STARS_InstructionID_t> SuccList;
		global_STARS_program->GetBlockSuccessorTargets((CallFlag || IndirCallFlag || TailCallFlag), CurrInst->GetInstID(), CurrInst->GetSize(), SuccList);
		if (FixedCallFlag) { // Need to find successor from fixed call push
			assert(SuccList.size() == 1); // should have one target successor, no other successors
			STARS_ea_t FallThroughAddr = CurrBlock->FindFixedCallFallThrough();
			map<STARS_ea_t, SMPBasicBlock *>::iterator MapEntry = this->InstBlockMap.find(FallThroughAddr);
			if (MapEntry == this->InstBlockMap.end()) {
				SMP_msg("WARNING: FixedCallPush at %llx not linked to FallThroughAddr at %llx\n",
					(unsigned long long) InstAddr, (unsigned long long) FallThroughAddr);
			}
			else {
				SMPBasicBlock *Target = MapEntry->second;
				// Make target block a successor of current block.
				CurrBlock->LinkToSucc(Target);
				// Make current block a predecessor of target block.
				Target->LinkToPred(CurrBlock);
				LinkedToTarget = true;
				if (DebugFlag) {
					SMP_msg("DEBUG: SetLinks linking to successor block at %llx\n",
						(unsigned long long) FallThroughAddr);
				}
			}
			if (!LinkedToTarget) {
				SMP_msg("ERROR: SetLinks unable to link to successor of fixed-call block ending at %llx.\n",
					(unsigned long long) InstAddr);
			}
		}
		else {
			for (list<STARS_InstructionID_t>::iterator IDIter = SuccList.begin(); IDIter != SuccList.end(); ++IDIter) {
				STARS_ea_t TargetAddr = IDIter->GetIDWithinFile();

				map<STARS_ea_t, SMPBasicBlock *>::iterator MapEntry = this->InstBlockMap.find(TargetAddr);
				if (MapEntry == this->InstBlockMap.end()) {
					; // do nothing; probably a tail call (not yet identified)
#if 1
					if (DebugFlag) {
						SMP_msg("WARNING: TargetAddr %x not found in map for %s\n", TargetAddr,
							this->GetFuncName());
						SMP_msg(" Referenced from %s\n", CurrInst->GetDisasm());
					}
#endif
				}
				else {
					SMPBasicBlock *Target = MapEntry->second;
					// Make target block a successor of current block.
					CurrBlock->LinkToSucc(Target);
					// Make current block a predecessor of target block.
					Target->LinkToPred(CurrBlock);
					LinkedToTarget = true;
					if (DebugFlag) {
						SMP_msg("DEBUG: SetLinks linking to successor block at %llx\n",
							(unsigned long long) TargetAddr);
					}
#if SMP_USE_SWITCH_TABLE_INFO
					if (IndirJumpFlag) {
						pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->JumpTableTargets.insert(TargetAddr);
						if (!InsertResult.second) {
							SMP_msg("WARNING: code address %llx found in multiple jump tables?\n", (uint64_t) TargetAddr);
						}
#if SMP_DEBUG_SWITCH_TABLE_INFO
						SMP_msg("Switch table link: jump at %x target at %x\n",
							CurrInst->GetAddr(), TargetAddr);
#else
						;
#endif
					}
#endif
				}
			} // end for all IDs in SuccList
		} // end if (FixedCallFlag) ... else [loop through SuccList]

		if (IndirJumpFlag && (!LinkedToTarget)) {
			this->UnresolvedIndirectJumps = true;
			++UnresolvedIndirectJumpCount;
			UnresolvedBranchWorkList.push_back(CurrBlock);
			SMP_msg("WARNING: Unresolved indirect jump at %llx FuncAddr: %llx\n",
				(unsigned long long) CurrInst->GetAddr(), (unsigned long long) this->GetFirstFuncAddr());
		}
		else if (IndirCallFlag && (!LinkedToTarget) && (!SysCallFlag)) {
			this->SetHasUnresolvedIndirectCalls();
			SMP_msg("WARNING: Unresolved indirect call at %llx FuncAddr: %llx\n",
				(unsigned long long) CurrInst->GetAddr(), (unsigned long long) this->GetFirstFuncAddr());
		}
		else if (IndirJumpFlag && LinkedToTarget) {
			++ResolvedIndirectJumpCount;
		}
	} // end for all blocks

	// Mark all blocks that can be reached from the entry block, so we can find the unreachable ones.
	this->ResetProcessedBlocks();
	this->Blocks.front()->DepthFirstMark(false);
	// We have two cases: (1) Unresolved indirect branches could be targeting the unmarked blocks, making
	//  these blocks reachable, in which case we should link the unresolved branches to the unmarked blocks;
	//  or (2) there are no unresolved branches, in which case the unmarked blocks are unreachable within
	//  the function. They might be reachable from outside the function using exception handling jumps, but
	//  that still would not allow us to link them into the CFG of this function properly, so in any case we
	//  are deleting those unreachable blocks and not emitting annotations for them.
	// NOTE: An odd new gcc recursion optimization uses indirect calls within the function, so
	//  they can behave like indirect jumps. However, we don't want to link unresolved calls to unmarked blocks
	//  at this time.
	bool HellNodeCase = (!UnresolvedBranchWorkList.empty() && (this->HasUnresolvedIndirectCalls() || this->HasUnresolvedIndirectJumps()));
	bool AddedMissingLinks = false;
	bool changed;
	bool FirstIteration = true;
	do {
		changed = false;
		list<SMPBasicBlock *>::iterator BlockIter = this->Blocks.begin();
		while (BlockIter != this->Blocks.end()) {
			CurrBlock = (*BlockIter);
			STARS_ea_t BlockAddr = CurrBlock->GetFirstAddr();
			if (CurrBlock->IsProcessed()) {
				if (CurrBlock->IsUnreachableBlock()) { // has call 0 instruction, must be unreachable
					this->GetProg()->AddBlockToRemovalList(CurrBlock);
				}
				++BlockIter;
			}
			else {
				// Block cannot be reached from entry node, even after we have added links
				//  on previous loop iterations.
				bool CatchBlockFound = global_STARS_program->IsCatchBlockAddr(BlockAddr);
				if (DebugFlag) {
					SMP_msg("DEBUG: SetLinks found unreachable block at %llx Catch: %d\n", 
						(uint64_t) BlockAddr, CatchBlockFound);
				}
				if (!HellNodeCase) {
					if (CurrBlock->AllNops())
						SMP_msg("INFO: Removing all nops block at %llx\n", (uint64_t) BlockAddr);
					else {
						CurrBlock->SetUnreachableBlock(true);
						SMP_msg("INFO: Function is Removing unreachable block at %llx\n", (uint64_t) BlockAddr);
					}

					bool MightBeIndirectTarget = true;
					this->RemoveBlock(CurrBlock, BlockIter, MightBeIndirectTarget);

#if 0  // Exception handling code requires something more delicate than this. Later checks for stack adjustment etc. can look at these blocks.
					// Finally, call destructors on the block and insts removed.
					InstIter = CurrBlock->GetFirstInst();
					while (InstIter != CurrBlock->GetLastInst()) {
						SMPInstr *DeadInst = (*InstIter);
						++InstIter;
						if (NULL != DeadInst) delete DeadInst;
					}
					delete CurrBlock;
#endif
				}
				else { // HellNodeCase
					if (!CatchBlockFound) {
						if (DebugFlag) {
							SMP_msg("DEBUG: SetLinks adding hell node links for unreachable block at %llx\n",
								(uint64_t)BlockAddr);
						}
						// Block must be reachable only through an unresolved indirect branch.
						// Make each unresolved indirect branch link to the block so it is reachable.
						AddedMissingLinks = true;
						for (list<SMPBasicBlock *>::iterator WorkIter = UnresolvedBranchWorkList.begin(); WorkIter != UnresolvedBranchWorkList.end(); ++WorkIter) {
							SMPBasicBlock *WorkBlock = (*WorkIter);
							WorkBlock->LinkToSucc(CurrBlock);
						}
					}
					else {
						SMP_msg("INFO: SetLinks not adding link because %llx is a catch block addr.\n", (uint64_t)BlockAddr);
					}
					// Mark CurrBlock as now being reachable, along with the blocks it dominates.
					CurrBlock->DepthFirstMark(true); // yes, even if it is a catch block
					++BlockIter;
					// Block could be IBTarget and thus actually be reachable. Cover our bases by emitting an IBT annotation.
					SMPInstr *FirstInst = (*(CurrBlock->GetFirstInst()));
					global_STARS_program->PrintUnknownCodeXref(BlockAddr, FirstInst->GetSize(), ZST_UNREACHABLEBLOCK);
				}
				changed = true;
			} // end if (processed) ... else ...
		} // end loop through blocks
		FirstIteration = false;
	} while (changed);
	if (HellNodeCase && (!AddedMissingLinks)) {
		SMP_msg("SERIOUS WARNING: SetLinks: Function at %llx has unresolved indirect branches but no unreachable blocks.\n",
			(unsigned long long) this->GetFirstFuncAddr());
	}

#if 0
	// If we have any blocks that are all no-ops and have no predecessors, remove those
	//  blocks. They are dead and make the CFG no longer a lattice. Any blocks that have
	//  no predecessors but are not all no-ops should also be removed with a different
	//  log message.
	// NOTE: Prior to construction of hell nodes in functions with unresolved indirect jumps,
	//  we cannot conclude that a block with no predecessors is unreachable. Also, the block
	//  order might be such that removal of a block makes an already processed block
	//  unreachable, so we have to iterate until there are no more changes.
	bool NoPredecessors;
	bool OnlyPredIsItself;
	list<SMPBasicBlock *>::iterator CurrPred;
#if SMP_USE_SWITCH_TABLE_INFO
	if (!(this->HasUnresolvedIndirectJumps() || this->HasUnresolvedIndirectCalls())) {
#else
	if (!(this->HasIndirectJumps() || this->HasIndirectCalls())) {
#endif
		bool changed;
		do {
			changed = false;
			BlockIter = this->Blocks.begin();
			++BlockIter; // don't delete the top block, no matter what.
			while (BlockIter != this->Blocks.end()) {
				CurrBlock = (*BlockIter);
				OnlyPredIsItself = false;
				CurrPred = CurrBlock->GetFirstPred();
				NoPredecessors = (CurrPred == CurrBlock->GetLastPred());
				if (!NoPredecessors) {
					if ((*CurrPred)->GetFirstAddr() == CurrBlock->GetFirstAddr()) { // self-recursion
						++CurrPred; // any more preds besides itself?
						OnlyPredIsItself = (CurrPred == CurrBlock->GetLastPred());
							// Only predecessor was the self-recursion if no more preds
					}
				}
				if (NoPredecessors || OnlyPredIsItself) {
					if (CurrBlock->AllNops())
						SMP_msg("Removing all nops block at %x\n", CurrBlock->GetFirstAddr());
					else
						SMP_msg("Removing block with no predecessors at %x\n", CurrBlock->GetFirstAddr());
					// Remove this block from the predecessors list of its successors.
					list<SMPBasicBlock *>::iterator SuccIter;
					STARS_ea_t TempAddr = CurrBlock->GetFirstAddr();
					for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
						(*SuccIter)->ErasePred(TempAddr);
					}
					// Transfer the unreachable block to the program-wide container of unreachable code.
					this->GetProg()->AddUnreachableBlock(CurrBlock);
					// Remove the unreachable instructions from the function inst list.
					vector<SMPInstr *>::iterator InstIter;
					InstIter = CurrBlock->GetFirstInst();
					STARS_ea_t FirstBadAddr = (*InstIter)->GetAddr();
					InstIter = CurrBlock->GetLastInst();
					--InstIter; // get last real instruction
					STARS_ea_t LastBadAddr = (*InstIter)->GetAddr();
					this->EraseInstRange(FirstBadAddr, LastBadAddr);

					// Finally, remove the block from the blocks list.
					BlockIter = this->Blocks.erase(BlockIter);
					this->BlockCount -= 1;
					changed = true;
				}
				else {
					++BlockIter;
				}
			} // end while all blocks after the first one
		} while (changed);
	} // end if not unresolved indirect jumps or indirect calls
	else if (this->HasUnresolvedIndirectJumps()) {
		// Make each unresolved indirect branch have each block with no predecessor as a target,
		//  so that the resulting CFG has a proper structure.
		BlockIter = this->Blocks.begin();
		++BlockIter; // The top block is expected to have no predecessors, which is not a CFG problem.
		bool AddedMissingLinks = false;
		while (BlockIter != this->Blocks.end()) {
			CurrBlock = (*BlockIter);
			OnlyPredIsItself = false;
			CurrPred = CurrBlock->GetFirstPred();
			NoPredecessors = (CurrPred == CurrBlock->GetLastPred());
			if (!NoPredecessors) {
				if ((*CurrPred)->GetFirstAddr() == CurrBlock->GetFirstAddr()) { // self-recursion
					++CurrPred; // any more preds besides itself?
					OnlyPredIsItself = (CurrPred == CurrBlock->GetLastPred());
						// Only predecessor was the self-recursion if no more preds
				}
			}
			if (NoPredecessors || OnlyPredIsItself) {
				// Block must be reachable only through an unresolved indirect branch.
				// Make each unresolved indirect branch link to the block so it is reachable.
				list<SMPBasicBlock *>::iterator WorkIter;
				AddedMissingLinks = true;
				for (WorkIter = UnresolvedBranchWorkList.begin(); WorkIter != UnresolvedBranchWorkList.end(); ++ WorkIter) {
					SMPBasicBlock *WorkBlock = (*WorkIter);
					WorkBlock->LinkToSucc(CurrBlock);
				}
			}
			++BlockIter;
		} // end for all blocks
		if (!AddedMissingLinks) {
			SMP_msg("SERIOUS WARNING: Function at %x has unresolved indirect branches but no unreachable blocks.\n", this->GetFirstFuncAddr());
		}
	}
#endif

	return;
} // end of SMPFunction::SetLinks()

// Number all basic blocks in reverse postorder (RPO) and set RPOBlocks vector to
//  access them.
void SMPFunction::RPONumberBlocks(void) {
#if SMP_DEBUG_DATAFLOW
	bool DebugFlag = false;
	DebugFlag = (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
	if (DebugFlag) SMP_msg("Entered RPONumberBlocks\n");
#endif
	int CurrNum = 0;
	list<SMPBasicBlock *> WorkList;
	this->ResetProcessedBlocks();
	bool Renumbering = (!(this->RPOBlocks.empty()));
	this->RPOBlocks.clear();  // we can re-do the RPO numbering as we remove unreachable blocks.

	// Number the first block with 0.
	list<SMPBasicBlock *>::iterator BlockIter = this->Blocks.begin();
	if ((BlockIter == this->Blocks.end()) || (0 == this->BlockCount)) {
		// Empty function. Happens when we remove basic blocks with "call 0" instructions.
		//  This means the entire function is only reachable when the code is compiled
		//  with some debug option, which was not present.
		this->BlockCount = 0;
		return;
	}

	SMPBasicBlock *CurrBlock = (*BlockIter);
#if 0
	if (this->RPOBlocks.capacity() <= (std::size_t) this->BlockCount) {
		SMP_msg("Reserving %d RPOBlocks old value: %d\n", 2+this->BlockCount, this->RPOBlocks.capacity());
		this->RPOBlocks.reserve(2 + this->BlockCount);
		this->RPOBlocks.assign(2 + this->BlockCount, this->Blocks.end());
	}
#endif
	CurrBlock->SetNumber(CurrNum);
	CurrBlock->SetProcessed(true);
	this->RPOBlocks.push_back(CurrBlock);
	++CurrNum;
	// Push the first block's successors onto the work list.
	list<SMPBasicBlock *>::iterator CurrSucc = CurrBlock->GetFirstSucc();
	while (CurrSucc != CurrBlock->GetLastSucc()) {
		WorkList.push_back(*CurrSucc);
		++CurrSucc;
	}

	// Use the WorkList to iterate through all blocks in the function
	list<SMPBasicBlock *>::iterator CurrListItem = WorkList.begin();
	bool change;
	while (!WorkList.empty()) {
		change = false;
		while (CurrListItem != WorkList.end()) {
			CurrBlock = (*CurrListItem);
			STARS_ea_t CurrAddr = CurrBlock->GetFirstAddr();
			if (CurrBlock->IsProcessed()) {
				// Duplicates get pushed onto the WorkList because a block
				//  can be the successor of multiple other blocks. If it is
				//  already numbered, it is a duplicate and can be removed
				//  from the list.
				CurrListItem = WorkList.erase(CurrListItem);
				change = true;
				continue;
			}
			if (CurrBlock->AllPredecessorsProcessed()) {
				// Ready to be numbered.
				CurrBlock->SetNumber(CurrNum);
				CurrBlock->SetProcessed(true);
#if 0
				SMP_msg("Set RPO number %d\n", CurrNum);
				if (DebugFlag && (7 == CurrNum))
					this->Dump();
#endif
				this->RPOBlocks.push_back(CurrBlock);
				++CurrNum;
				change = true;
				// Push its unnumbered successors onto the work list.
				CurrSucc = CurrBlock->GetFirstSucc();
				while (CurrSucc != CurrBlock->GetLastSucc()) {
					if (!(*CurrSucc)->IsProcessed())
						WorkList.push_back(*CurrSucc);
					++CurrSucc;
				}
				CurrListItem = WorkList.erase(CurrListItem);
			}
			else {
				++CurrListItem;
			}
		} // end while (CurrListItem != WorkList.end())
		if (change) {
			// Reset CurrListItem to beginning of work list for next iteration.
			CurrListItem = WorkList.begin();
		}
		else {
			// Loops can cause us to not be able to find a WorkList item that has
			//  all predecessors numbered. Take the WorkList item with the lowest address
			//  and number it so we can proceed.
			CurrListItem = WorkList.begin();
			STARS_ea_t LowAddr = (*CurrListItem)->GetFirstAddr();
			list<SMPBasicBlock *>::iterator SaveItem = CurrListItem;
			++CurrListItem;
			while (CurrListItem != WorkList.end()) {
				if (LowAddr > (*CurrListItem)->GetFirstAddr()) {
					SaveItem = CurrListItem;
					LowAddr = (*CurrListItem)->GetFirstAddr();
				}
				++CurrListItem;
			}
			// SaveItem should now be numbered.
			(*SaveItem)->SetNumber(CurrNum);
			(*SaveItem)->SetProcessed(true);
#if SMP_DEBUG_DATAFLOW
			SMP_msg("Picked LowAddr %x and set RPO number %d\n", LowAddr, CurrNum);
#endif
			this->RPOBlocks.push_back(*SaveItem);
			++CurrNum;
			// Push its unnumbered successors onto the work list.
			CurrSucc = (*SaveItem)->GetFirstSucc();
			while (CurrSucc != (*SaveItem)->GetLastSucc()) {
				if (!(*CurrSucc)->IsProcessed())
					WorkList.push_back(*CurrSucc);
				++CurrSucc;
			}
			CurrListItem = WorkList.erase(SaveItem);
			CurrListItem = WorkList.begin();
		} // end if (change) ... else ...
	} // end while work list is nonempty

	// Prior to construction of hell nodes for functions with indirect jumps, there
	//  could still be unnumbered blocks because they appear to be unreachable
	//  (no predecessors from SetLinks() because they are reached only via indirect
	//  jumps). We need to number these and push them on the RPOBlocks vector so
	//  that the vector contains all the blocks.
	// NOTE: Odd new gcc recursion optimization seems to use indirect calls to reach
	//  some blocks within a recursive function, operating somewhat like an indirect
	//  jump.
	if (!Renumbering && (CurrNum < this->BlockCount)) { // already did this during the first numbering
		if (this->HasIndirectJumps() || this->HasIndirectCalls()) {
			for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
				CurrBlock = (*BlockIter);
				if (SMP_BLOCKNUM_UNINIT == CurrBlock->GetNumber()) {
					SMP_msg("WARNING: Numbering indirectly reachable block at %llx\n", (unsigned long long) CurrBlock->GetFirstAddr());
					CurrBlock->SetNumber(CurrNum);
					CurrBlock->SetProcessed(true);
					this->RPOBlocks.push_back(CurrBlock);
					++CurrNum;
				}
			}
		}
	}
	// If we still have unnumbered blocks, it is not because of indirect jumps or calls.
	//  We have some mysterious dead code.
	if (this->BlockCount > ((int) this->RPOBlocks.size())) {
		SMP_msg("SERIOUS WARNING: RPONumberBlocks method: Function %s has BlockCount %d and RPOBlocks size %zu\n",
			this->GetFuncName(), this->BlockCount, this->RPOBlocks.size());
		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			CurrBlock = (*BlockIter);
			if (!(CurrBlock->IsProcessed())) {
				SMP_msg("WARNING: Numbering apparently unreachable block at %llx\n", (unsigned long long) CurrBlock->GetFirstAddr());
				CurrBlock->SetNumber(CurrNum);
				CurrBlock->SetProcessed(true);
				this->RPOBlocks.push_back(CurrBlock);
				++CurrNum;
			}
		}
	}

	if (((long long) this->BlockCount) > ((long long) STARS_MaxBlockCount)) {
		assert(0 < this->BlockCount);
		STARS_MaxBlockCount = (unsigned long) this->BlockCount;
		assert(this->BlockCount <= STARS_BLOCKNUM_MASK);
	}
	return;
} // end of SMPFunction::RPONumberBlocks()

// return block # of block that falls through to CurrBlockNum; SMP_BLOCKNUM_UNINIT if none
int SMPFunction::GetFallThroughPredBlockNum(int CurrBlockNum) {
	assert((0 <= CurrBlockNum) && (CurrBlockNum < (int)this->RPOBlocks.size()));
	int FTPredBlockNum = SMP_BLOCKNUM_UNINIT;
	SMPBasicBlock *CurrBlock = this->RPOBlocks[(size_t) CurrBlockNum];
	for (list<SMPBasicBlock *>::iterator PredIter = CurrBlock->GetFirstPred(); PredIter != CurrBlock->GetLastPred(); ++PredIter) {
		list<SMPBasicBlock *>::const_iterator SuccIter = (*PredIter)->GetFallThroughSucc();
		if (SuccIter != (*PredIter)->GetLastSucc()) { // has a fall-through successor
			if ((*SuccIter)->GetNumber() == CurrBlockNum) { // found it
				FTPredBlockNum = (*PredIter)->GetNumber();
				break;
			}
		}
	}
	return FTPredBlockNum;
} // end of SMPFunction::GetFallThroughPredBlockNum()

void SMPFunction::SetMarkerInstDefs(void) {
	// Create DEFs in the marker instruction for all names in the LiveInSet
	//  of the first block. These are the names for the function that
	//  would otherwise look like USEs of uninitialized variables later.
	// Note that the LiveVariableAnalysis work does not actually populate
	//  a LiveInSet for the first block, so we simulate it with its
	//  dataflow equation, UpExposed union (LiveOut minus VarKill).
	STARSOpndSetIter UpExposedIter, LiveOutIter;
	list<SMPInstr *>::iterator MarkerInst = this->Instrs.begin();
	SMPBasicBlock *FirstBlock = this->Blocks.front();
	for (UpExposedIter = FirstBlock->GetFirstUpExposed();
		UpExposedIter != FirstBlock->GetLastUpExposed();
		++UpExposedIter) {
		// Add DEF with SSANum of 0.
		(*MarkerInst)->AddDef(*UpExposedIter, UNINIT, 0);
		// Add to the VarKill and LiveIn sets.
		FirstBlock->AddVarKill(*UpExposedIter); // "killed" by marker inst def
		FirstBlock->AddLiveIn(*UpExposedIter);
	}
	for (LiveOutIter = FirstBlock->GetFirstLiveOut();
		LiveOutIter != FirstBlock->GetLastLiveOut();
		++LiveOutIter) {
		if (!(FirstBlock->IsVarKill(*LiveOutIter))) {
			// Add DEF with SSANum of 0.
			(*MarkerInst)->AddDef(*LiveOutIter, UNINIT, 0);
			// Add to the VarKill and LiveIn sets.
			FirstBlock->AddVarKill(*LiveOutIter); // "killed" by marker inst def
			FirstBlock->AddLiveIn(*LiveOutIter);
		}
	}
}

// Perform live variable analysis on all blocks in the function.
// See chapter 9 of Cooper/Torczon, Engineering a Compiler, 1st edition, for the algorithm.
void SMPFunction::LiveVariableAnalysis(bool Recomputing) {
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
#if SMP_DEBUG_DATAFLOW
	bool DebugFlag = (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
#endif
#if SMP_DEBUG_DATAFLOW_VERBOSE
	SMP_msg("LiveVariableAnalysis for %s\n", this->GetFuncName());
#endif

	if (Recomputing) {
		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			CurrBlock = (*BlockIter);

			// Clear out old Phi functions before they are recomputed later.
			CurrBlock->ClearPhiFunctions();

			// Initialize the Killed and UpwardExposed sets for each block.
			CurrBlock->InitKilledExposed(this->UsesFramePointer(), false);

			// Clear the loop bits for each block.
			CurrBlock->ResetLoopHeaderBlock();
			CurrBlock->ResetLoopTailBlock();
		}
	}

	bool changed;
	// Iterate over each block, updating LiveOut sets until no more changes are made.
	// NOTE: LVA is more efficient when computed over a reverse post-order list of blocks
	//  from the inverted CFG. We have an RPO list from the forward CFG, so it is just as
	//  good to simply iterate through the blocks in layout order.
#if 1
	do {
		changed = false;
		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			CurrBlock = (*BlockIter);
			changed |= CurrBlock->UpdateLiveOut(false);
		}
	} while (changed);
#else // Use reverse postorder
	do {
		changed = false;
		for (std::size_t index = 0; index < this->RPOBlocks.size(); ++index) {
			CurrBlock = this->RPOBlocks.at(index);
			changed |= CurrBlock->UpdateLiveOut(false);
		}
	} while (changed);
#endif

	this->SetMarkerInstDefs();

#if SMP_DEBUG_DATAFLOW_VERBOSE
	if (DebugFlag) SMP_msg("Exiting LiveVariableAnalysis\n");
#endif
	return;
} // end of SMPFunction::LiveVariableAnalysis()

// After normalization, re-do stack ops in LVA sets.
void SMPFunction::RecomputeStackLVA(void) {
	// Step 1: Erase old stack ops in all LVA sets.
	bool StackOpsRemoved = false;
	bool UseFP = this->UsesFramePointer();

	list<SMPBasicBlock *>::iterator BlockIter;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		StackOpsRemoved = ((*BlockIter)->RemoveStackOpsFromLVASets() || StackOpsRemoved);
	}

	if (!StackOpsRemoved)
		return;

	// Step 2: Re-analyze LVA VarKill, UpExposed, and LiveOut stack ops.
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		(*BlockIter)->InitKilledExposed(UseFP, true);
	}
	bool changed = true;
	while (changed) {
		bool ChangedOnThisIter = false;
		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			ChangedOnThisIter = ((*BlockIter)->UpdateLiveOut(true) || ChangedOnThisIter);
		}
		changed = ChangedOnThisIter;
	}

	// Step 3: Redo the stack ops in the SSA marker rung.
	SMPBasicBlock *FirstBlock = this->GetBlockByNum(0);
	vector<SMPInstr *>::iterator MarkerIter = FirstBlock->GetFirstInst();
	SMPInstr *MarkerInst = (*MarkerIter);
	assert(MarkerInst->IsMarkerInst());
	MarkerInst->ClearDefs();
	this->SetMarkerInstDefs();

	// Step 4: Redo the LiveIn sets.
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		(*BlockIter)->ComputeLiveInSet(true);
	}

	return;
} // SMPFunction::RecomputeStackLVA()


// Return the IDom index that is the end of the intersection prefix of the Dom sets of
//  the two blocks designated by the RPO numbers passed in.
// See Cooper & Torczon, "Engineering a Compiler" 1st edition figure 9.8.
int SMPFunction::IntersectDoms(int block1, int block2) const {
	int finger1 = block1;
	int finger2 = block2;
	while (finger1 != finger2) {
		while (finger1 > finger2)
			finger1 = this->IDom.at(finger1);
		while (finger2 > finger1)
			finger2 = this->IDom.at(finger2);
	}
	return finger1;
} // end of SMPFunction::IntersectDoms()

// Compute immediate dominators of all blocks into IDom[] vector.
void SMPFunction::ComputeIDoms(void) {
	bool DebugFlag = false;
#if SMP_DEBUG_DATAFLOW
	DebugFlag = (0 == strcmp("_ZN6soplex7NameSetC2Eiidd", this->GetFuncName()));
	if (DebugFlag) SMP_msg("Entered ComputeIDoms\n");
#endif
	// Initialize the IDom[] vector to uninitialized values for all blocks.
	this->IDom.reserve(this->BlockCount);
	this->IDom.assign(this->BlockCount, SMP_BLOCKNUM_UNINIT);
	if (DebugFlag) {
		SMP_msg("BlockCount = %d RPOBlocks size = %zu\n", this->BlockCount, this->RPOBlocks.size());
	}
	if (this->BlockCount != ((int) this->RPOBlocks.size())) {
		SMP_msg("SERIOUS WARNING: Function %s has BlockCount of %d and RPOBlocks size of %zu\n",
			this->GetFuncName(), this->BlockCount, this->RPOBlocks.size());
	}
	this->IDom[0] = 0; // First block is dominated only by itself
	bool changed;
	do {
		changed = false;
		for (std::size_t RPONum = 1; RPONum < (std::size_t) this->BlockCount; ++RPONum) {
			if (DebugFlag) SMP_msg("RPONum %zu\n", RPONum);
#if 0
			if (DebugFlag) {
				SMP_msg("RPOBlocks vector size: %d\n", this->RPOBlocks.size());
				for (std::size_t index = 0; index < this->RPOBlocks.size(); ++index) {
					SMP_msg("RPOBlocks entry %d is %d\n", index, RPOBlocks[index]->GetNumber());
				}
			}
#endif
			// To avoid infinite loops on blocks that dominate themselves but otherwise have no 
			//  predecessors (probably reachable only through indirect jumps), we stop processing
			//  the blocks once the IDom becomes the top (entry) block. This probably saves time
			//  on other blocks as well.
			if (0 == this->IDom[RPONum])
				continue;

			SMPBasicBlock *CurrBlock = this->RPOBlocks.at(RPONum);
			// if (DebugFlag) SMP_msg("CurrBlock: %x\n", CurrBlock._Ptr);
			list<SMPBasicBlock *>::iterator CurrPred;
			// Initialize NewIdom to the first processed predecessor of block RPONum.
			int NewIdom = SMP_BLOCKNUM_UNINIT;
			for (CurrPred = CurrBlock->GetFirstPred(); CurrPred != CurrBlock->GetLastPred(); ++CurrPred) {
				int PredNum = (*CurrPred)->GetNumber();
				if (DebugFlag) SMP_msg("Pred: %d\n", PredNum);
				// **!!** See comment below about unreachable blocks.
				if (SMP_BLOCKNUM_UNINIT == PredNum)
					continue;
				int PredIDOM = this->IDom.at(PredNum);
				if (DebugFlag) SMP_msg("Pred IDom: %d\n", PredIDOM);
				if (SMP_BLOCKNUM_UNINIT != PredIDOM) {
					NewIdom = PredNum;
					break;
				}
			}
			if (NewIdom == SMP_BLOCKNUM_UNINIT) {
				SMP_msg("WARNING: Failure on NewIdom in ComputeIDoms for %s\n", this->GetFuncName());
				if (this->HasIndirectJumps() || this->HasIndirectCalls()) {
					// Might be reachable only through indirect jumps.
					NewIdom = 0; // make it dominated by entry block
					SMP_msg("WARNING: Assuming block %d at address %llx is reachable indirectly.\n",
						CurrBlock->GetNumber(), (unsigned long long) CurrBlock->GetFirstAddr());
				}
				else {
					// Might be exception handling code, reachable only by call stack walking.
					NewIdom = 0; // make it be dominated by entry block
					SMP_msg("WARNING: Assuming block %d at address %llx is reachable by exception handling.\n",
						CurrBlock->GetNumber(), (unsigned long long) CurrBlock->GetFirstAddr());
				}
			}
			assert(NewIdom != SMP_BLOCKNUM_UNINIT);
			// Loop through all predecessors of block RPONum except block NewIdom.
			//  Set NewIdom to the intersection of its Dom set and the Doms set of
			//  each predecessor that has had its Doms set computed.
			for (CurrPred = CurrBlock->GetFirstPred(); CurrPred != CurrBlock->GetLastPred(); ++CurrPred) {
				int PredNum = (*CurrPred)->GetNumber();
				if (DebugFlag) SMP_msg("PredNum: %d\n", PredNum);
				// **!!** We can avoid failure on unreachable basic blocks
				//  by executing a continue statement if PredNum is -1. Long term solution
				//  is to prune out unreachable basic blocks, or better yet, create hell nodes
				//  if the function has indirect jumps.
				if (PredNum == SMP_BLOCKNUM_UNINIT)
					continue;
				int PredIDOM = this->IDom.at(PredNum);
				if (DebugFlag) SMP_msg("PredIDOM: %d\n", PredIDOM);
				if ((SMP_BLOCKNUM_UNINIT == PredIDOM) || (NewIdom == PredIDOM)) {
					// Skip predecessors that have uncomputed Dom sets, or are the
					//  current NewIdom.
					continue;
				}
				if (DebugFlag) SMP_msg("Old NewIdom value: %d\n", NewIdom);
				NewIdom = this->IntersectDoms(PredNum, NewIdom);
				if (DebugFlag) SMP_msg("New NewIdom value: %d\n", NewIdom);
			}
			// If NewIdom is not the value currently in vector IDom[], update the
			//  vector entry and set changed to true.
			if (NewIdom != this->IDom.at(RPONum)) {
				if (DebugFlag) SMP_msg("IDOM changed from %d to %d\n", this->IDom.at(RPONum), NewIdom);
				this->IDom[RPONum] = NewIdom;
				changed = true;
			}
		}
	} while (changed);
	return;
} // end of SMPFunction::ComputeIDoms()

// Compute dominance frontier sets for each block.
void SMPFunction::ComputeDomFrontiers(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *RunnerBlock;
	SMPBasicBlock *CurrBlock;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		// We look only at join points in the CFG, as per Cooper/Torczon chapter 9.
		if (1 < CurrBlock->GetNumPreds()) { // join point; more than 1 predecessor
			int runner;
			list<SMPBasicBlock *>::iterator PredIter;
			SMPBasicBlock *CurrPred;
			for (PredIter = CurrBlock->GetFirstPred(); PredIter != CurrBlock->GetLastPred(); ++PredIter) {
				CurrPred = (*PredIter);
				// For each predecessor, we run up the IDom[] vector and add CurrBlock to the
				//  DomFrontier for all blocks that are between CurrPred and IDom[CurrBlock],
				//  not including IDom[CurrBlock] itself.
				runner = CurrPred->GetNumber();
				while (runner != this->IDom.at(CurrBlock->GetNumber())) {
					// Cooper/Harvey/Kennedy paper does not quite agree with the later
					//  text by Cooper/Torczon. Text (1st ed.) says that the start node has no IDom
					//  in the example on pages 462-463, but it shows an IDOM for the
					//  root node in Figure 9.9 of value == itself. The first edition text
					//  on p.463 seems correct, as the start node dominates every node and
					//  thus should have no dominance frontier.
					if (SMP_TOP_BLOCK == runner)
						break;
					RunnerBlock = this->RPOBlocks.at(runner);
					RunnerBlock->AddToDomFrontier(CurrBlock->GetNumber());
					runner = this->IDom.at(runner);
				}
			} // end for all predecessors
		} // end if join point
	} // end for all blocks
	return;
} // end of SMPFunction::ComputeDomFrontiers()

// Compute the GlobalNames set, which includes all operands that are used in more than
//  one basic block. It is the union of all UpExposedSets of all blocks.
void SMPFunction::ComputeGlobalNames(void) {
	STARSOpndSetIter SetIter;
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	unsigned int index = 0;
	this->GlobalNames.clear();
	if (this->Blocks.size() < 2)
		return; // cannot have global names if there is only one block

#if SMP_DEBUG_DATAFLOW
	bool DebugFlag = false;
	DebugFlag = (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
#endif

	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		for (SetIter = CurrBlock->GetFirstUpExposed(); SetIter != CurrBlock->GetLastUpExposed(); ++SetIter) {
			STARSOpndTypePtr TempOp = *SetIter;
			// The GlobalNames set will have the complete collection of operands that we are
			//  going to number in our SSA computations. We now assign an operand number
			//  within the STARSOpndType structure for each, so that we can index into the
			//  BlocksUsedIn[] vector, for example. This operand number is not to be
			//  confused with SSA numbers.
			// We use the operand number field op_t.n for the lower 8 bits, and the offset
			//  fields op_t.offb:op_t.offo for the upper 16 bits. We are overwriting IDA
			//  values here, but operands in the data flow analysis sets should never be
			//  inserted back into the IDA database anyway.
			SetGlobalIndex(TempOp, (std::size_t) index);

#if SMP_DEBUG_DATAFLOW
			if (DebugFlag) {
				SMP_msg("Global Name: ");
				PrintListOperand(TempOp);
			}
#endif
			pair<STARSOpndSetIter, bool> InsertResult;
			InsertResult = this->GlobalNames.insert(TempOp);
			if (!InsertResult.second) {
				// Already in GlobalNames, so don't assign an index number.
				;
#if SMP_DEBUG_DATAFLOW
				if (DebugFlag) {
					SMP_msg(" already in GlobalNames.\n");
				}
#endif
			}
			else {
				++index;
#if SMP_DEBUG_DATAFLOW
				if (DebugFlag) {
					SMP_msg(" inserted as index %d\n", ExtractGlobalIndex(TempOp));
				}
#endif
			}
		} // for each upward exposed item in the current block
	} // for each basic block

	SMP_msg("INFO: GlobalNames size is %zu for func at %llx\n", this->GlobalNames.size(),
		(unsigned long long) this->GetFirstFuncAddr());
	assert(16777215 >= this->GlobalNames.size()); // index fits in 24 bits
	return;
} // end of SMPFunction::ComputeGlobalNames()

// For each item in GlobalNames, record the blocks that DEF the item.
void SMPFunction::ComputeBlocksDefinedIn(void) {
	// Loop through all basic blocks and examine all DEFs. For Global DEFs, record
	//  the block number in BlocksDefinedIn. The VarKill set records DEFs without
	//  having to examine every instruction.
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;

	this->BlocksDefinedIn.clear();
	for (std::size_t i = 0; i < this->GlobalNames.size(); ++i) {
		list<int> TempList;
		this->BlocksDefinedIn.push_back(TempList);
	}
#if SMP_DEBUG_DATAFLOW_VERBOSE
	SMP_msg("Number of GlobalNames: %d\n", this->GlobalNames.size());
	SMP_msg("Size of BlocksDefinedIn: %d\n", this->BlocksDefinedIn.size());
#endif
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		STARSOpndSetIter KillIter;
		CurrBlock = (*BlockIter);
		for (KillIter = CurrBlock->GetFirstVarKill(); KillIter != CurrBlock->GetLastVarKill(); ++KillIter) {
			// If killed item is not a block-local item (it is global), record it.
			STARSOpndTypePtr KillOp = (*KillIter);
			STARSOpndSetIter NameIter = this->GlobalNames.find(KillOp);
			if (NameIter != this->GlobalNames.end()) { // found in GlobalNames set
				// We have a kill of a global name. Get index from three 8-bit fields.
				unsigned int index = ExtractGlobalIndex(*NameIter);
				if (index >= this->GlobalNames.size()) {
					// We are about to assert false.
					SMP_msg("ComputeBlocksDefinedIn: Bad index: %u limit: %zu\n", index,
						this->GlobalNames.size());
					SMP_msg("Block number %d\n", CurrBlock->GetNumber());
					SMP_msg("Killed item: ");
					PrintListOperand(*KillIter);
					SMP_msg("\n");
					SMP_msg("This is a fatal error.\n");
				}
				assert(index < this->GlobalNames.size());
				// index is a valid subscript for the BlocksDefinedIn vector. Push the
				//  current block number onto the list of blocks that define this global name.
				this->BlocksDefinedIn[index].push_back(CurrBlock->GetNumber());
				(*KillIter)->SetOpGlobalIndex(index);
			}			
		} // end for all VarKill operands
		// Set the GlobalNameIndex into each LiveOutSet item.
		STARSOpndSetIter LiveOutIter;
		for (LiveOutIter = CurrBlock->GetFirstLiveOut(); LiveOutIter != CurrBlock->GetLastLiveOut(); ++LiveOutIter) {
			STARSOpndTypePtr LiveOutOp = (*LiveOutIter);
			STARSOpndSetIter NameIter = this->GlobalNames.find(LiveOutOp);
			assert(NameIter != this->GlobalNames.end()); // found in GlobalNames set
			unsigned int index = ExtractGlobalIndex(*NameIter);
			(*LiveOutIter)->SetOpGlobalIndex(index);
		}
	} // end for all blocks
	return;
} // end of SMPFunction::ComputeBlocksDefinedIn()

// Compute the phi functions at the entry point of each basic block that is a join point.
void SMPFunction::InsertPhiFunctions(void) {
	STARSOpndSetIter NameIter;
	list<int> WorkList;  // list of block numbers
	bool DebugFlag = false;
#if SMP_DEBUG_DATAFLOW
	DebugFlag = (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
#endif
	if (DebugFlag) SMP_msg("GlobalNames size: %zu\n", this->GlobalNames.size());
	for (NameIter = this->GlobalNames.begin(); NameIter != this->GlobalNames.end(); ++NameIter) {
		int CurrNameIndex = (int) (ExtractGlobalIndex(*NameIter));
		if (DebugFlag) SMP_msg("CurrNameIndex: %d\n", CurrNameIndex);
#if 0
		DebugFlag = (DebugFlag && (6 == CurrNameIndex));
#endif
		// Initialize the work list to all blocks that define the current name.
		WorkList.clear();
		list<int>::iterator WorkIter;
		for (WorkIter = this->BlocksDefinedIn.at((std::size_t) CurrNameIndex).begin();
			WorkIter != this->BlocksDefinedIn.at((std::size_t) CurrNameIndex).end();
			++WorkIter) {
			WorkList.push_back(*WorkIter);
		}

		// Iterate through the work list, inserting phi functions for the current name
		//  into all the blocks in the dominance frontier of each work list block.
		//  Insert into the work list each block that had a phi function added.
		while (!WorkList.empty()) {
#if SMP_DEBUG_DATAFLOW_VERBOSE
			if (DebugFlag) SMP_msg("WorkList size: %d\n", WorkList.size());
#endif
			list<int>::iterator WorkIter = WorkList.begin();
			while (WorkIter != WorkList.end()) {
				set<int>::iterator DomFrontIter;
#if SMP_DEBUG_DATAFLOW_VERBOSE
				if (DebugFlag) SMP_msg("WorkIter: %d\n", *WorkIter);
#endif
				if (DebugFlag && (*WorkIter > this->BlockCount)) {
					SMP_msg("ERROR: WorkList block # %d out of range.\n", *WorkIter);
				}
				SMPBasicBlock *WorkBlock = this->RPOBlocks[*WorkIter];
				for (DomFrontIter = WorkBlock->GetFirstDomFrontier();
					DomFrontIter != WorkBlock->GetLastDomFrontier();
					++DomFrontIter) {
#if SMP_DEBUG_DATAFLOW_VERBOSE
					if (DebugFlag) SMP_msg("DomFront: %d\n", *DomFrontIter);
#endif
					if (DebugFlag && (*DomFrontIter > this->BlockCount)) {
						SMP_msg("ERROR: DomFront block # %d out of range.\n", *DomFrontIter);
					}
					SMPBasicBlock *PhiBlock = this->RPOBlocks[*DomFrontIter];
					// Before inserting a phi function for the current name in *PhiBlock,
					//  see if the current name is LiveIn for *PhiBlock. If not, there
					//  is no need for the phi function. This check is what makes the SSA
					//  a fully pruned SSA.
					if (PhiBlock->IsLiveIn(*NameIter)) {
						std::size_t NumPreds = PhiBlock->GetNumPreds();
						DefOrUse CurrRef(*NameIter);
						SMPPhiFunction CurrPhi(CurrNameIndex, CurrRef);
						for (std::size_t NumCopies = 0; NumCopies < NumPreds; ++NumCopies) {
							CurrPhi.PushBack(CurrRef); // inputs to phi
						}
						if (PhiBlock->AddPhi(CurrPhi)) {
							// If not already in Phi set, new phi function was inserted.
							WorkList.push_back(PhiBlock->GetNumber());
#if SMP_DEBUG_DATAFLOW_VERBOSE
							if (DebugFlag) SMP_msg("Added phi for name %d at top of block %d\n", CurrNameIndex, PhiBlock->GetNumber());
#endif
						}
					}
					else {
						if (DebugFlag) {
							SMP_msg("Global %d not LiveIn for block %d\n", CurrNameIndex, PhiBlock->GetNumber());
						}
					}
				} // end for all blocks in the dominance frontier
				// Remove current block number from the work list
				if (DebugFlag) {
					SMP_msg("Removing block %d from work list.\n", *WorkIter);
				}
				WorkIter = WorkList.erase(WorkIter);
			} // end for all block numbers in the work list
		} // end while the work list is not empty
		if (DebugFlag) SMP_msg("WorkList empty.\n");
	} // end for all elements of the GlobalNames set
	return;
} // end of SMPFunction::InsertPhiFunctions()

// Build the dominator tree.
void SMPFunction::BuildDominatorTree(void) {
	std::size_t index;
	// First, fill the DomTree vector with the parent numbers filled in and the child lists
	//  left empty.
	for (index = 0; index < this->IDom.size(); ++index) {
		pair<int, list<int> > DomTreeEntry;
		DomTreeEntry.first = this->IDom.at(index);
		DomTreeEntry.second.clear();
		this->DomTree.push_back(DomTreeEntry);
	}
	// Now, push the children onto the appropriate lists.
	for (index = 0; index < this->IDom.size(); ++index) {
		// E.g. if block 5 has block 3 as a parent, then we fetch the number 3
		//  using the expression this->DomTree.at(index).first, which was just
		//  initialized in the previous loop. Then we go to DomTree entry 3 and push
		//  the number 5 on its child list.
		int parent = this->DomTree.at(index).first;
		if (parent != (int) index) // block can dominate itself, but not in DomTree!
			this->DomTree.at(parent).second.push_back((int) index);
	}

	return;
} // end of SMPFunction::BuildDominatorTree()

// Does basic block HeadBlockNum dominate basic block TailBlockNum?
bool SMPFunction::DoesBlockDominateBlock(int HeadBlockNum, int TailBlockNum) const {
	if (HeadBlockNum == TailBlockNum)
		return true;
	else if ((HeadBlockNum == SMP_BLOCKNUM_UNINIT) || (TailBlockNum == SMP_BLOCKNUM_UNINIT)) {
		return false;
	}
	else {
		// Recurse downward from HeadBlockNum in the dominator tree until we find TailBlockNum
		//  or return false if we never find it.
		bool FoundIt = false;
		for (list<int>::const_iterator ChildIter = this->DomTree.at(HeadBlockNum).second.begin(); 
			ChildIter != this->DomTree.at(HeadBlockNum).second.end(); 
			++ChildIter) {
			int ChildBlockNum = (*ChildIter);
			if (this->DoesBlockDominateBlock(ChildBlockNum, TailBlockNum)) { // recurse depth-first
				FoundIt = true;
				break;
			}
		}
		return FoundIt;
	}
} // end of SMPFunction::DoesBlockDominateBlock()

#define STARS_DFS_UNVISITED -1
#define STARS_DFS_IN_PROGRESS 0
#define STARS_DFS_COMPLETED 1

// return true if CFG is a reducible graph.
bool SMPFunction::TestCFGReducibility(void) {
	this->DFSMarkers.clear();
	for (std::size_t BlockNum = 0; BlockNum < (std::size_t) this->BlockCount; ++BlockNum) {
		this->DFSMarkers.push_back(STARS_DFS_UNVISITED);
	}

	bool Reducible = this->CFGReducibilityHelper(SMP_TOP_BLOCK);
	this->DFSMarkers.clear();

	return Reducible;
} // end of SMPFunction::TestCFGReducibility()

// recursive depth-first-search helper for TestCFGReducibility()
bool SMPFunction::CFGReducibilityHelper(std::size_t BlockNumber) {
	bool Reducible = true;

	this->DFSMarkers.at(BlockNumber) = STARS_DFS_IN_PROGRESS;
	SMPBasicBlock *CurrBlock = this->RPOBlocks.at(BlockNumber);
	for (list<SMPBasicBlock *>::iterator SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
		SMPBasicBlock *SuccBlock = (*SuccIter);
		std::size_t SuccBlockNum = (std::size_t) SuccBlock->GetNumber();
		if (SuccBlockNum < this->DFSMarkers.size()) {
			int DFSStatus = this->DFSMarkers[SuccBlockNum];
			if (DFSStatus == STARS_DFS_UNVISITED) {
				Reducible = this->CFGReducibilityHelper(SuccBlockNum);
			}
			else if (DFSStatus == STARS_DFS_IN_PROGRESS) {
				// back edge; reducibility depends on dominator relationship
				if (this->DoesBlockDominateBlock((int) BlockNumber, (int) SuccBlockNum)) {
					// forward edge; no problem
					;
				}
				else if (this->DoesBlockDominateBlock((int) SuccBlockNum, (int) BlockNumber)) {
					// reducible back edge; no problem
					;
				}
				else { // neither block dominates the other; irreducible back edge
					Reducible = false;
					SMP_msg("SERIOUS WARNING: Function %s at %llx is irreducible because of back edge from block at %llx to %llx\n", 
						this->GetFuncName(), (unsigned long long) this->GetFirstFuncAddr(), 
						(unsigned long long) SuccBlock->GetFirstAddr(), (unsigned long long) CurrBlock->GetFirstAddr());
				}
			}
			else {
				assert(DFSStatus == STARS_DFS_COMPLETED);
				// forward edge or cross edge; no problem
			}
		}
		else {
			Reducible = false;
			SMP_msg("ERROR: Block %d out of range in CFGReducibilityHelper for parent block %u\n", SuccBlock->GetNumber(), BlockNumber);
		}
		if (!Reducible) {
			break;
		}
	} // end for all successors

	this->DFSMarkers.at(BlockNumber) = STARS_DFS_COMPLETED;

	return Reducible;
} // end of SMPFunction::CFGReducibilityHelper()

bool SMPFunction::IsBlockInAnyLoop(int BlockNum) const {
	if (0 == this->LoopCount)
		return false;
	assert(((size_t) BlockNum) < this->GetNumBlocks());
	bool FoundInsideLoop = this->FuncLoopsByBlock.at((std::size_t) BlockNum).IsAnyBitSet();
	return FoundInsideLoop;
} // end of SMPFunction::IsBlockInAnyLoop()

// Is block (with block # BlockNum) inside loop # LoopNum?
bool SMPFunction::IsBlockInLoop(int BlockNum, std::size_t LoopNum) {
	if (0 == this->LoopCount)
		return false;
	assert(((size_t) BlockNum) < this->GetNumBlocks());
	return ((LoopNum < this->LoopCount) && (this->FuncLoopsByBlock.at((std::size_t) BlockNum).GetBit(LoopNum)));
} // end of SMPFunction::IsBlockInLoop()

bool SMPFunction::AreBlocksInSameLoops(const int BlockNum1, const int BlockNum2) const {
	if (0 == this->LoopCount)
		return true;
	assert(((size_t) BlockNum1) < this->GetNumBlocks());
	assert(((size_t) BlockNum2) < this->GetNumBlocks());
	bool SameLoops = (this->FuncLoopsByBlock[(std::size_t) BlockNum1].FindLowestBitSet() == this->FuncLoopsByBlock[(std::size_t) BlockNum2].FindLowestBitSet())
		&& (this->FuncLoopsByBlock[(std::size_t) BlockNum1].FindHighestBitSet() == this->FuncLoopsByBlock[(std::size_t) BlockNum2].FindHighestBitSet());
	return SameLoops;
} // end of SMPFunction::AreBlocksInSameLoops()

// build list of loop numbers that BlockNum is part of.
void SMPFunction::BuildLoopList(int BlockNum, list<std::size_t> &LoopList) {
	std::size_t LoopIndex;
	assert((BlockNum >= 0) && (BlockNum < this->BlockCount));
	for (LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		if (this->FuncLoopsByBlock.at((std::size_t) BlockNum).GetBit(LoopIndex)) {
			LoopList.push_back(LoopIndex);
		}
	}
	return;
} // end of SMPFunction::BuildLoopList()

// Build list of Block numbers contained in LoopNum.
void SMPFunction::BuildLoopBlockList(const size_t LoopNum, list<std::size_t> &BlockList) {
	assert(LoopNum < this->LoopCount);
	for (size_t BlockIndex = 0; BlockIndex < (size_t)this->BlockCount; ++BlockIndex) {
		if (this->FuncBlocksByLoop.at(LoopNum).GetBit(BlockIndex)) {
			BlockList.push_back(BlockIndex);
		}
	}
	return;
} // end of SMPFunction::BuildLoopBlockList()

// Analyze how many times each loop iterates
void SMPFunction::AnalyzeLoopIterations(void) {
	bool VerboseOutput = global_stars_interface->VerboseLoopsMode();
	bool DebugFlag = false;
	DebugFlag = (0x417710 == this->GetFirstFuncAddr());
	// DebugFlag = (0x402730 == this->GetFirstFuncAddr());
	VerboseOutput = (VerboseOutput || DebugFlag);
	FILE *InfoAnnotFile = global_STARS_program->GetInfoAnnotFile();
	if (0 < this->GetNumLoops()) {
		// Get ready to mark memory writes by loop.
		STARSBitSet TempPositiveBitSet;
		TempPositiveBitSet.AllocateBits(this->MaxStackAccessLimit);
		STARSBitSet TempNegativeBitSet;
		TempNegativeBitSet.AllocateBits(0 - this->MinStackAccessOffset);
		for (unsigned i = 0; i < this->LoopCount; ++i) {
			this->NegativeOffsetStackBytesWrittenByLoop.push_back(TempNegativeBitSet);
			this->PositiveOffsetStackBytesWrittenByLoop.push_back(TempPositiveBitSet);
		}
	}

	this->LoopIterationsInitExprs.resize(this->LoopCount, nullptr);
	this->LoopIterationsLimitExprs.resize(this->LoopCount, nullptr);
	this->LoopIterationsCountExprs.resize(this->LoopCount, nullptr);
	this->LoopMemWriteRangeExprs.resize(this->LoopCount);
	this->LoopMemWriteBoundsExprs.resize(this->LoopCount);
	this->LoopMemWriteBoundsExprsExpanded.resize(this->LoopCount);
	this->StringMemWriteRangeExprs.resize(this->LoopCount + 1);
	this->StringMemWriteRangeWidths.resize(this->LoopCount + 1);
	this->LoopRegHashSets.resize(this->LoopCount);
	this->LoopRegSourceExprPairs.resize(this->LoopCount);
	this->LoopIVUsedAsStringWriteCounter.resize(this->LoopCount);
	this->NonStackFrameLoopMemWrites.resize(this->LoopCount, false);
	this->LoopMemRangeInArgRegsBitmap.resize(this->LoopCount);
	this->OutputRegsByLoop.resize(this->LoopCount);
	this->CalleePreservedRegsByLoop.resize(this->LoopCount);
	std::bitset<1 + MD_LAST_REG_NO> TempBitset(0);
	this->MemRangeRegsBitmap = TempBitset;
	this->LoopAnalyzedBIVIters.resize(this->LoopCount);
	this->LoopWritesGlobalStaticMemory.resize(this->LoopCount, false);
	this->LoopReadsGlobalStaticMemory.resize(this->LoopCount, false);
	this->LoopHasCalleeMemWrites.resize(this->LoopCount, false);
	this->LoopUsesStackPtrRegs.resize(this->LoopCount, true); // default to the most common case
	this->LoopHasPreconditions.resize(this->LoopCount, true); // default to the most common case
	this->LoopExecutesWithLimitValue.resize(this->LoopCount, false);
	this->LoopAnalysisProblems.resize(this->LoopCount, false);
	this->CalleeMemExprProblems.resize(this->LoopCount + 1, false);
	this->SymbolicAnalysisProblems.resize(this->LoopCount + 1, false);
	this->LoopIncrementValue.resize(this->LoopCount, 0); // default to invalid case
	this->LoopMemAddrExprsFromCallees.resize(this->LoopCount);
	this->LoopMemAddrExprWidthsFromCallees.resize(this->LoopCount);
	this->LoopMemExprsExpandToStackOffsets.resize(this->LoopCount, false);
	this->LoopMemAddrExprWidthsFromCalleeLoops.resize(this->LoopCount + 1);
	this->LoopMemAddrExprsFromCalleeLoops.resize(this->LoopCount + 1);
	this->MemAddrExprsFromCallees.resize(this->LoopCount + 1);
	this->MemAddrExprWidthsFromCallees.resize(this->LoopCount + 1);
	this->NonStackFrameCalleeMemWrites.resize(this->LoopCount + 1);
	this->InArgsUsedInMemWrites.resize(this->LoopCount + 1);
	this->InArgsUsedInMemWriteByteWidths.resize(this->LoopCount + 1);
	this->StoppedOnIVMemRangeExprs.resize(this->LoopCount + 1);
	this->StoppedOnIVNonRangeExprs.resize(this->LoopCount + 1);
	this->StoppedOnIVMemRangeIterWidths.resize(this->LoopCount + 1);
	this->StoppedOnIVNonRangeIterWidths.resize(this->LoopCount + 1);
	this->RelationalLowerBoundExprs.resize(this->LoopCount + 1);
	this->RelationalUpperBoundExprs.resize(this->LoopCount + 1);
	this->RelationalMemWriteWidths.resize(this->LoopCount + 1);

	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		this->LoopMemRangeInArgRegsBitmap[LoopIndex] = TempBitset;
		this->LoopAnalyzedBIVIters[LoopIndex] = this->LoopInductionVars[LoopIndex].end();
		++STARS_LoopIterationExprFailures; // Decrement if we succeed

		// Find the condition that terminates the loop.
		struct LoopComparison CurrentLoopComparisonExpr;
		CurrentLoopComparisonExpr.CompareOperator = SMP_NULL_OPERATOR;
		int TestBlockNum = this->LoopTestBlocksByLoopNum[LoopIndex];
		int LoopType = this->LoopTypesByLoopNum[LoopIndex];
		bool LowerLimit = false, UpperLimit = false; // Loop terminates by becoming greater than UpperLimit, or less than LowerLimit?
		STARSDefUseIter ExitUse1, ExitUse2; // iterators for the 
		STARS_uval_t ExitConstUse2 = 0; // const value for exit comparison instead of ExitUse2
		bool CompareAgainstConst = false; // compare ExitUse1 against ExitConstUse2 if true, else ExitUse1 vs. ExitUse2
		if ((SMP_BLOCKNUM_UNINIT != TestBlockNum) && ((LoopType == STARS_TOP_TESTING_LOOP) || (LoopType == STARS_BOTTOM_TESTING_LOOP))) {
			SMPBasicBlock *TestBlock = this->GetBlockByNum(TestBlockNum);
			int FollowBlockNum = this->LoopFollowNodes[LoopIndex];
			list<SMPBasicBlock *>::const_iterator TestTargBlockIter = TestBlock->GetCondNonFallThroughSucc();
			if (TestTargBlockIter == TestBlock->GetLastConstSucc()) {
				SMP_msg("ERROR: LOOP: Could not find FallThroughSuccessor in test block %d loop %u in func at %llx\n",
					TestBlockNum, LoopIndex, (uint64_t) this->GetFirstFuncAddr());
				this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
				this->LoopAnalysisProblems[LoopIndex] = true;
				continue;
			}

			int TestTargBlockNum = (*TestTargBlockIter)->GetNumber();
			bool BranchExitsLoop = (TestTargBlockNum == FollowBlockNum);
			CurrentLoopComparisonExpr.ExitsLoop = BranchExitsLoop;
			SMPoperator BranchOperator = SMP_NULL_OPERATOR;
			STARS_ea_t DecrementAddr = STARS_BADADDR;
			SMPInstr *CompareOrTestInst = TestBlock->FindBranchCompareOrTest(BranchOperator, DecrementAddr);
			if (NULL != CompareOrTestInst) {
				CompareOrTestInst->GetCompareOrTestExpr(CurrentLoopComparisonExpr);
				CurrentLoopComparisonExpr.CompareOperator = BranchOperator;
				CurrentLoopComparisonExpr.CompareAddr = CompareOrTestInst->GetAddr();
				this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
			}
			else if (STARS_BADADDR != DecrementAddr) {
				// Found a decrement rather than a compare or test.
				SMPInstr *DecrementInst = this->GetInstFromAddr(DecrementAddr);
				assert(nullptr != DecrementInst);
				if ((BranchOperator == SMP_EQUAL) || (BranchOperator == SMP_NOT_EQUAL)) {
					CurrentLoopComparisonExpr.CompareOperator = BranchOperator;
					CurrentLoopComparisonExpr.CompareAddr = DecrementAddr;
					STARSDefUseIter DecrementDEF = DecrementInst->GetFirstNonFlagsDef();
					CurrentLoopComparisonExpr.Operand1 = (*DecrementDEF);
					STARSOpndTypePtr ZeroOp = DecrementInst->MakeImmediateOpnd(0);
					DefOrUse ZeroUse(ZeroOp);
					CurrentLoopComparisonExpr.Operand2 = ZeroUse;
					this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
				}
				else {
					SMP_msg("ERROR: LOOP: Decrement with BranchOperator %d in test block %d DecrementAddr %llx loop %u in func %s at %llx\n",
						BranchOperator, TestBlockNum, (uint64_t) DecrementAddr, LoopIndex, this->GetFuncName(), (uint64_t) this->GetFirstFuncAddr());
					this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
					this->LoopAnalysisProblems[LoopIndex] = true;
					continue;
				}
			}
			else {
				SMP_msg("ERROR: LOOP: Could not find compare or test in test block %d loop %zu in func %s at %llx\n",
					TestBlockNum, LoopIndex, this->GetFuncName(), (uint64_t) this->GetFirstFuncAddr());
				this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
				this->LoopAnalysisProblems[LoopIndex] = true;
				continue;
			}
		}
		else {
			SMP_msg("ERROR: LOOP: Could not do iteration analysis on loop of type %d loop %zu in func %s at %llx\n",
				LoopType, LoopIndex, this->GetFuncName(), (uint64_t) this->GetFirstFuncAddr());
			this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
			this->LoopAnalysisProblems[LoopIndex] = true;
			continue; // cannot analyze loop iterations
		}

		if (LoopIndex < this->LoopInductionVars.size()) {
			SMP_msg("INFO: LOOP: Analyzing loop iteration exprs for function %s loop %zu\n", this->GetFuncName(), LoopIndex);
			for (STARSInductionVarFamilyIter IVarVecIter = this->LoopInductionVars[LoopIndex].begin();
				IVarVecIter != this->LoopInductionVars[LoopIndex].end();
			++IVarVecIter) {

				int OutsideDefSSANum = (*IVarVecIter).BIVIncomingSSANum;
				if (OutsideDefSSANum != SMP_SSA_UNINIT) {
					// We have a valid basic induction variable.
					STARSOpndTypePtr BIVDefOp = (*IVarVecIter).BasicInductionVar.InductionVar.GetOp();
					// Normalize the LoopComparison struct so that the BIVDefOp is the LeftOperand.
					SMPoperator RelationalOperator = CurrentLoopComparisonExpr.CompareOperator;
					bool BIVIsLeftOp = IsEqOpIgnoreBitwidth(CurrentLoopComparisonExpr.Operand1.GetOp(), BIVDefOp);
					bool BIVIsRightOp = false;
					if (!BIVIsLeftOp)
						BIVIsRightOp = IsEqOpIgnoreBitwidth(CurrentLoopComparisonExpr.Operand2.GetOp(), BIVDefOp);
					if (BIVIsRightOp) { // invert comparison and swap operands
						CurrentLoopComparisonExpr.CompareOperator = InvertRelationalOperator(RelationalOperator);
						RelationalOperator = CurrentLoopComparisonExpr.CompareOperator;
						CurrentLoopComparisonExpr.Operand2 = CurrentLoopComparisonExpr.Operand1;
						CurrentLoopComparisonExpr.Operand1 = this->LoopComparisonExprs.back().Operand2;
						this->LoopComparisonExprs.pop_back();
						this->LoopComparisonExprs.push_back(CurrentLoopComparisonExpr);
					}
					// assert(BIVIsLeftOp || BIVIsRightOp); // not true for BIV decrement or increment in lieu of a compare opcode
					if (!(BIVIsLeftOp || BIVIsRightOp))
						continue;
					bool PositiveIncrement = IsPositiveIncrementBIV((*IVarVecIter).BasicInductionVar);

					//  First, we need to find a loop-invariant initializer for the incoming value.
					STARS_ea_t OutsideDefAddr = (*IVarVecIter).BIVIncomingDefAddr;
					assert(STARS_BADADDR != OutsideDefAddr);
					// if (!(STARS_IsBlockNumPseudoID(OutsideDefAddr) || STARS_IsSSAMarkerPseudoID(OutsideDefAddr))) {
					if (!(STARS_IsBlockNumPseudoID(OutsideDefAddr))) {
						SMPInstr *OutsideDefInst = this->GetInstFromAddr(OutsideDefAddr);
						assert(nullptr != OutsideDefInst);
						STARSExpression *InitExpr = OutsideDefInst->CreateDefExpr(BIVDefOp);
						// Replace constants and simplify the init expression.
						InitExpr->EvaluateConsts();
						bool ParentChanged = InitExpr->SimplifyDriver();
						bool StoppedOnIV = false;
						bool changed = false;
						set<int> LoopRegHashes;
						set<STARS_ea_t> InitStackPtrCopySet;
						set<STARS_ea_t> LimitStackPtrCopySet;
						int DepthCounter = 0;
						bool Expanded = InitExpr->ExpandExpr(InitExpr->GetParentInst()->GetAddr(), LoopIndex, false, false, false, false, true, LoopRegHashes, StoppedOnIV, changed, InitStackPtrCopySet, DepthCounter);
						if (Expanded) {
							assert(!StoppedOnIV);
							InitExpr->EvaluateConsts();
							ParentChanged = InitExpr->SimplifyDriver();
							if (VerboseOutput) {
								SMP_msg("INFO: LOOP: Init EXPR:");
								InitExpr->Dump(0);
							}

							// Next, we need an expr for the loop limit test.
							STARSExpression *LimitExpr = this->CreateLimitExpr(LoopIndex, *IVarVecIter, CurrentLoopComparisonExpr);
							if (nullptr != LimitExpr) {
								LimitExpr->EvaluateConsts();
								ParentChanged = LimitExpr->SimplifyDriver();
								DepthCounter = 0;
								Expanded = LimitExpr->ExpandExpr(LimitExpr->GetParentInst()->GetAddr(), LoopIndex, true, false, false, false, false, LoopRegHashes, StoppedOnIV, changed, LimitStackPtrCopySet, DepthCounter);
								if (Expanded) {
									assert(!StoppedOnIV);
									LimitExpr->EvaluateConsts();
									ParentChanged = LimitExpr->SimplifyDriver();
									if (VerboseOutput) {
										SMP_msg("INFO: LOOP: Limit EXPR:");
										LimitExpr->Dump(0);
									}

									// Next, create the iterations count expr.
									STARS_sval_t LimitIncrease;
									STARSExpression *IterationCountExpr = this->CreateIterationsExpr(LoopIndex, *IVarVecIter, PositiveIncrement, InitExpr->Clone(), LimitExpr->Clone());

									if (nullptr != IterationCountExpr) {
										// If we have taken the address of a global as the LimitExpr,
										//  and the Limit value is a sentinel and memory is not therefore
										//  accessed on an iteration with the BIV == the Limit value, then
										//  there is a potential problem in the move_globals transformation.
										//  We could use the address of global var2 as a sentinel when looping
										//  through global var1. If we move global var1 away from global var2, then
										//  we need to adjust the limit value to follow it and not remain
										//  pointing at global var2.
										if (!this->LoopExecutesWithLimitValue[LoopIndex]) {
											// Limit value is not used. Might be the sentinel case.
											if (1 == LimitStackPtrCopySet.size()) { // simple case
												STARSOpndTypePtr AddendOp = (*IVarVecIter).BasicInductionVar.Addend.GetOp();
												assert(AddendOp->IsImmedOp());
												STARS_sval_t IncDecValue = (STARS_sval_t)AddendOp->GetImmedValue();
												// If we have a loop IncDecValue of k, and we start in var1 and an
												//  address in var2 is the sentinel, then the last memory value accessed
												//  will be k bytes below the sentinel. We need move_globals to instrument
												//  the instruction that takes the address of the sentinel location so that
												//  it is logically location_in_var1+k rather than logically being
												//  location_in_var2, i.e. as var1 is moved, the sentinel address computation
												//  will remain location_in_var1+k and make no reference to var2.
												// The annotation we emit will therefore need to record two numbers:
												//  location_in_var1 and k.
												STARS_ea_t CopySetInstAddr = (*LimitStackPtrCopySet.cbegin());
												SMPInstr *CopySetInst = this->GetInstFromAddr(CopySetInstAddr);
												assert(nullptr != CopySetInst);
												STARS_uval_t GlobalAddr;
												if (CopySetInst->IsLoadGlobalStaticDataAddress(GlobalAddr)) {
													STARS_sval_t Var1Loc = (STARS_sval_t)GlobalAddr;
													Var1Loc -= IncDecValue;
													// Emit annotation with Var1Loc and IncDecValue for CopySetInstAddr.
													SMP_fprintf(InfoAnnotFile, "%18llx %6u INSTR SENTINEL BASE %llx OFFSET %lld ZZ\n",
														(uint64_t)CopySetInstAddr, CopySetInst->GetSize(), (uint64_t)Var1Loc, (int64_t)IncDecValue);
												}
											}
										}
										ParentChanged = IterationCountExpr->SimplifyDriver();
										this->LoopIterationsInitExprs[LoopIndex] = InitExpr;
										this->LoopIterationsLimitExprs[LoopIndex] = LimitExpr;
										this->LoopIterationsCountExprs[LoopIndex] = IterationCountExpr;
										SMP_msg("INFO: LOOP: Computed IterationCountExpr for loop %d in function at %llx\n",
											LoopIndex, (uint64_t) this->GetFirstFuncAddr());
										// Save the InitExpr and LimitExpr in case this BIV
										//  is encountered in symbolic ExpandExpr() work later
										//  and we need to substitute the bounds of it.
										(*IVarVecIter).BIVInitExpr = InitExpr;
										(*IVarVecIter).BIVLimitExpr = LimitExpr;
										this->LoopAnalyzedBIVIters[LoopIndex] = IVarVecIter;
										--STARS_LoopIterationExprFailures; // undo top of loop increment
										++STARS_LoopIterationExprSuccesses;
										if (VerboseOutput) {
											SMP_msg("INFO: LOOP: IterationCount EXPR:");
											IterationCountExpr->Dump(0);
										}
										break; // Found the primary BIV for this LoopIndex
									}
									else {
										SMP_msg("ERROR: LOOP: Failure to create IterationCountExpr for loop %zu in function at %llx\n",
											LoopIndex, (uint64_t) this->GetFirstFuncAddr());
										this->LoopAnalysisProblems[LoopIndex] = true;
									}
								}
								else {
									SMP_msg("ERROR: LOOP: Failure on ExpandExpr() for LimitExpr in AnalyzeLoopIterations() for loop %zu in function at %llx\n",
										LoopIndex, (uint64_t) this->GetFirstFuncAddr());
									this->LoopAnalysisProblems[LoopIndex] = true;
								}
							}
							else {
								SMP_msg("ERROR: LOOP: Failure to create LimitExpr for loop %zu in function at %llx\n",
									LoopIndex, (uint64_t) this->GetFirstFuncAddr());
								this->LoopAnalysisProblems[LoopIndex] = true;
							}
						}
						else {
							SMP_msg("ERROR: LOOP: Failure on ExpandExpr() for InitExpr in AnalyzeLoopIterations() for loop %zu in function at %llx\n",
								LoopIndex, (uint64_t) this->GetFirstFuncAddr());
							this->LoopAnalysisProblems[LoopIndex] = true;
						}
					}
				}
			} // end for all IV iters in current LoopIndex
		}
	} // end for LoopIndex: loop 1, iteration count analysis

	// See if we had any fatal errors in analysis.
	for (size_t LoopIndex = 0; LoopIndex < this->GetNumLoops(); ++LoopIndex) {
		if (nullptr == this->LoopIterationsCountExprs[LoopIndex]) {
			this->LoopAnalysisProblems[LoopIndex] = true;
			if (this->DoesLoopWriteMemory(LoopIndex)) {
				SMP_msg("ERROR: LOOP: Failure to create iteration count expr for loop %d in function at %llx\n",
					LoopIndex, (uint64_t) this->GetFirstFuncAddr());
			}
			else {
				SMP_msg("SERIOUS WARNING: LOOP: Failure to create iteration count expr for non-mem-writing loop %d starting at %llx in function at %llx\n",
					LoopIndex, (uint64_t)(this->GetBlockByNum((size_t) this->LoopHeadBlockNumbers[LoopIndex])->GetFirstAddr()), (uint64_t) this->GetFirstFuncAddr());
			}
		}
	}

	// Second loop: Create InitExpr and LimitExpr for every basic
	//  induction var that was not involved in the loop compare-and-branch and was
	//  therefore not processed in the previous loop.
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		// bool AttemptAnalysis = (!this->LoopAnalysisProblems[LoopIndex] && (LoopIndex < this->LoopInductionVars.size()));
		bool AttemptAnalysis = (LoopIndex < this->LoopInductionVars.size());
		if (AttemptAnalysis) {
			if (VerboseOutput) {
				SMP_msg("INFO: LOOP: Analyzing secondary BIVs for function %s loop %zu\n", this->GetFuncName(), LoopIndex);
			}
			for (STARSInductionVarFamilyIter IVarVecIter = this->LoopInductionVars[LoopIndex].begin();
				IVarVecIter != this->LoopInductionVars[LoopIndex].end();
				++IVarVecIter) {

				int OutsideDefSSANum = (*IVarVecIter).BIVIncomingSSANum;
				if (OutsideDefSSANum != SMP_SSA_UNINIT) {
					// We have a valid basic induction variable.
					if (nullptr == (*IVarVecIter).BIVLimitExpr) {
						// This BIV was not analyzed in the first loop,
						//  because it is not involved in the loop exit
						//  compare-and-branch. Its limit must therefore be obtained
						//  using the iteration count expr times its stride plus its init value.
						STARSExpression *InitExpr = (*IVarVecIter).BIVInitExpr;
						bool StoppedOnIV = false;
						set<int> LoopRegHashes;
						if (nullptr == InitExpr) {
							STARS_ea_t OutsideDefAddr = (*IVarVecIter).BIVIncomingDefAddr;
							assert(STARS_BADADDR != OutsideDefAddr);
							// if (!(STARS_IsBlockNumPseudoID(OutsideDefAddr) || STARS_IsSSAMarkerPseudoID(OutsideDefAddr))) {
							if (!(STARS_IsBlockNumPseudoID(OutsideDefAddr))) {
								SMPInstr *OutsideDefInst = this->GetInstFromAddr(OutsideDefAddr);
								assert(nullptr != OutsideDefInst);
								STARSOpndTypePtr BIVDefOp = (*IVarVecIter).BasicInductionVar.InductionVar.GetOp();
								InitExpr = OutsideDefInst->CreateDefExpr(BIVDefOp);
								// Replace constants and simplify the init expression.
								assert(nullptr != InitExpr);
								InitExpr->EvaluateConsts();
								bool ParentChanged = InitExpr->SimplifyDriver();
								if (VerboseOutput) {
									SMP_msg("INFO: LOOP: Init EXPR for secondary BIV:");
									InitExpr->Dump(0);
								}
								// Record the InitExpr in the IV structure.
								(*IVarVecIter).BIVInitExpr = InitExpr;
							}
							else {
								SMP_msg("ERROR: LOOP: Cannot analyze BIV with OutsideDefAddr of %llx\n", (uint64_t)OutsideDefAddr);
								continue;
							}
						} // end if nullptr == InitExpr

						// Next, we need an expr for the loop limit test.
						STARSExpression *LimitExpr = this->CreateSecondaryBIVLimitExpr(LoopIndex, *IVarVecIter);
						if (nullptr != LimitExpr) {
							LimitExpr->EvaluateConsts();
							bool ParentChanged = LimitExpr->SimplifyDriver();
							if (VerboseOutput) {
								SMP_msg("INFO: LOOP: Limit EXPR for secondary BIV:");
								LimitExpr->Dump(0);
							}
							// Record the LimitExpr in the IV structure.
							(*IVarVecIter).BIVLimitExpr = LimitExpr;
						}
						else {
							SMP_msg("ERROR: LOOP: Failure to create secondary BIV LimitExpr\n");
						}
					}
				}
			} // end for each IVarVecIter in current loop index
		}
	} // end for each loop index: loop 2, secondary BIV range analysis

	// Third loop: Create InitExpr and LimitExpr for all dependent induction vars.
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
#if 0
		if (this->LoopAnalysisProblems[LoopIndex])
			continue; // analysis failed in the first loop
#endif
		if (VerboseOutput) {
			SMP_msg("INFO: Analyzing DIVs for function %s loop %zu\n", this->GetFuncName(), LoopIndex);
		}
		size_t IVarVecIndex = 0; // for debugging breakpoints
		for (STARSInductionVarFamilyIter IVarVecIter = this->LoopInductionVars[LoopIndex].begin();
			IVarVecIter != this->LoopInductionVars[LoopIndex].end();
			++IVarVecIter, ++IVarVecIndex) {
			int OutsideDefSSANum = (*IVarVecIter).BIVIncomingSSANum;
			if (OutsideDefSSANum != SMP_SSA_UNINIT) {
				// We have a valid basic induction variable.
				//  See if it had InitExpr and LimitExpr analyzed successfully.
				InductionVarFamily IVFamily = (*IVarVecIter);
				if (nullptr == IVFamily.BIVInitExpr) {
					continue;
				}
				// We have a valid and analyzed BIV.
				//  See if it has any dependent induction vars.
				for (size_t DIVIndex = 0; DIVIndex < IVFamily.DependentInductionVars.size(); ++DIVIndex) {
					STARSExpression *DIVInitExpr = this->CreateDIVInitExpr(LoopIndex, IVFamily, DIVIndex);
					bool success = (nullptr != DIVInitExpr);
					if (!success) {
						SMP_msg("ERROR: LOOP: CreateDIVInitExpr failed for loop %zu DIVIndex %zu\n", LoopIndex, DIVIndex);
						continue;
					}
					STARSExpression *DIVLimitExpr = nullptr;
					if (nullptr != IVFamily.BIVLimitExpr) {
						DIVLimitExpr = this->CreateDIVLimitExpr(LoopIndex, IVFamily, DIVIndex);
						success = (nullptr != DIVLimitExpr);
						if (!success) {
							SMP_msg("ERROR: LOOP: CreateDIVLimitExpr failed for loop %zu DIVIndex %zu\n", LoopIndex, DIVIndex);
						}
					}
#if 0
					bool changed = false;
					success = this->ReplaceAllBIVsWithExprs(LoopIndex, DIVInitExpr, true, changed);
					if (!success) {
						SMP_msg("ERROR: LOOP: ReplaceAllBIVsWithExprs on InitExpr failed for loop %zu DIVIndex %zu\n", LoopIndex, DIVIndex);
						continue;
					}
					success = this->ReplaceAllBIVsWithExprs(LoopIndex, DIVLimitExpr, false, changed);
					if (!success) {
						SMP_msg("ERROR: LOOP: ReplaceAllBIVsWithExprs on LimitExpr failed for loop %zu DIVIndex %zu\n", LoopIndex, DIVIndex);
						continue;
					}
#endif
					DIVInitExpr->EvaluateConsts();
					(void) DIVInitExpr->SimplifyDriver();
					if (nullptr != DIVLimitExpr) {
						DIVLimitExpr->EvaluateConsts();
						(void)DIVLimitExpr->SimplifyDriver();
					}
					// Record InitExpr and LimitExpr for DIV.
					IVFamily.DependentInductionVars[DIVIndex].DIVInitExpr = DIVInitExpr;
					IVFamily.DependentInductionVars[DIVIndex].DIVLimitExpr = DIVLimitExpr;
					if (VerboseOutput) {
						SMP_msg("INFO: DIV Init Expr for DIVIndex = %zu\n", DIVIndex);
						DIVInitExpr->Dump(0);
						if (nullptr != DIVLimitExpr) {
							SMP_msg("INFO: DIV Limit Expr for DIVIndex = %zu\n", DIVIndex);
							DIVLimitExpr->Dump(0);
						}
					}
				}
			}
		} // end for all induction var iters
	} // end for all loops: loop 3, DIV range analysis

	// Fourth loop: Create the memory expressions for memory writes and reads in each loop.
	set<STARS_ea_t> StaticMemWriteAddrsEmitted; // annotations emitted; avoid duplication
	set<STARS_ea_t> StaticMemReadAddrsEmitted; // annotations emitted; avoid duplication
	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
			// Try to determine the memory write address range in the loop.
		STARSInductionVarFamilyIter IVarVecIter = this->LoopAnalyzedBIVIters[LoopIndex];
		// bool AttemptAnalysis = (!this->LoopAnalysisProblems[LoopIndex] && (IVarVecIter != this->LoopInductionVars[LoopIndex].end()));
		bool AttemptAnalysis = (IVarVecIter != this->LoopInductionVars[LoopIndex].end());
		if (!AttemptAnalysis)
			continue; // analysis failed in the first loop

		STARSExprSet MemWriteExprs;
		set<int> LoopRegHashes = this->LoopRegHashSets[LoopIndex];
		vector<set<STARS_ea_t> > StackPtrCopiesVector;
		STARSMemWriteExprsList MemWriteExprWidths;
		bool PositiveIncrement = IsPositiveIncrementBIV((*IVarVecIter).BasicInductionVar);
		bool MemRangeSuccess = this->CreateSPARKMemoryWriteRangeExpr(LoopIndex, true, LoopRegHashes, MemWriteExprs, MemWriteExprWidths, StackPtrCopiesVector);
		if (MemRangeSuccess) {
			this->LoopRegHashSets[LoopIndex] = LoopRegHashes;
			SMP_msg("INFO: LOOP: Computed %zu LoopMemWriteExprs for loop %d in function at %llx\n",
				MemWriteExprs.size(), LoopIndex, (uint64_t) this->GetFirstFuncAddr());
			size_t MemListIndex = 0; // for debugging
			for (STARSMemWriteExprListIter MemListIter = MemWriteExprWidths.begin(); MemListIter != MemWriteExprWidths.end(); ++MemListIter, ++MemListIndex) {
				STARSExprSetIter ExprIter = (*MemListIter).first;
				STARSExpression *MemAddressRangeExpr = (*ExprIter); 
				pair<STARSExprSetIter, bool> InsertResult = this->LoopMemWriteRangeExprs[LoopIndex].insert(MemAddressRangeExpr);
				if (VerboseOutput) {
					SMP_msg("INFO: LoopMemWrite EXPR:");
					MemAddressRangeExpr->Dump(0);
					if (MemAddressRangeExpr->IsStackPtrRegUsed()) {
						SMP_msg("INFO: LoopMemWrite EXPR is stack-based.\n");
					}
				}
				// Create the lower and upper bounds of the memory writing range.
				STARSExpression *MemWriteLowerBound = MemAddressRangeExpr->Clone(); // deep copy, for editing
				bool changed = false;
				bool InitSuccess = this->ReplaceAllIVsWithExprs(LoopIndex, MemWriteLowerBound, true, changed);
				bool LimitSuccess = false;
				bool success = InitSuccess; // InitSuccess is mandatory for annotations
				if (success) {
					STARSExpression *MemWriteUpperBound = MemAddressRangeExpr->Clone(); // deep copy, for editing
					LimitSuccess = this->ReplaceAllIVsWithExprs(LoopIndex, MemWriteUpperBound, false, changed);
					if (InitSuccess) { // InitSuccess is mandatory for annotations.
						if (VerboseOutput) {
							SMP_msg("INFO: Successfully replaced IVs with InitExpr for loop %d in function at %llx LimitSuccess: %d\n",
								LoopIndex, (uint64_t) this->GetFirstFuncAddr(), LimitSuccess);
						}
						(void) MemWriteLowerBound->SimplifyDriver();
						if (LimitSuccess) {
							(void) MemWriteUpperBound->SimplifyDriver();
						}
						else {
							// We will emit annotations that have a useful starting address for the
							//  memory range written, and a code that indicates the range limit is unknown.
							delete MemWriteUpperBound;
							MemWriteUpperBound = nullptr;
							this->LoopAnalysisProblems[LoopIndex] = true;
							SMP_msg("INFO: Failure to ReplaceIVsWithLimitExpr for MemListIndex %zu loop %zu in function at %llx\n",
								MemListIndex, LoopIndex, (uint64_t) this->GetFirstFuncAddr());
						}
						if (PositiveIncrement) {
							STARSExprBoundsPair InsertVal(MemWriteLowerBound, MemWriteUpperBound);
							pair<STARSExprBoundsIter, bool> InsertResult = this->LoopMemWriteBoundsExprs[LoopIndex].insert(InsertVal);
						}
						else {
							STARSExprBoundsPair InsertVal(MemWriteUpperBound, MemWriteLowerBound);
							pair<STARSExprBoundsIter, bool> InsertResult = this->LoopMemWriteBoundsExprs[LoopIndex].insert(InsertVal);
						}
						int StackPtrCopiesVecIndex = (*MemListIter).second.second;
						if (VerboseOutput) {
							assert(nullptr != MemWriteLowerBound);
							SMP_msg("INFO: MemWriteLowerBound EXPR:");
							MemWriteLowerBound->Dump(0);
							if (nullptr != MemWriteUpperBound) {
								SMP_msg("INFO: MemWriteUpperBound EXPR:");
								MemWriteUpperBound->Dump(0);
							}
						}

						// Clone the lower and upper bounds expr for inheritance into callers.
						STARSExpression *MemWriteLowerBoundExpanded = MemWriteLowerBound->Clone();
						STARSExpression *MemWriteUpperBoundExpanded = nullptr;
						if (nullptr != MemWriteUpperBound)
							MemWriteUpperBoundExpanded = MemWriteUpperBound->Clone();

						// For inheritance into the callers of this function, we want
						//  to expand the expressions beyond loop boundaries, hopefully
						//  as far as incoming args or constant initializers.
						set<STARS_ea_t> NewStackPtrCopiesSet;
						success = this->ExpandExprToInArgs(LoopIndex, MemWriteLowerBoundExpanded, true, NewStackPtrCopiesSet);
						if (success && LimitSuccess) {
							LimitSuccess = this->ExpandExprToInArgs(LoopIndex, MemWriteUpperBoundExpanded, false, NewStackPtrCopiesSet);
						}
						if (success) {
							set<STARS_ea_t> UnionedStackPtrCopiesSet;
							insert_iterator<set<STARS_ea_t> > EndIter = std::inserter(UnionedStackPtrCopiesSet, UnionedStackPtrCopiesSet.begin());
							if (0 <= StackPtrCopiesVecIndex) { // vector has entry for a set
								EndIter = merge(StackPtrCopiesVector[StackPtrCopiesVecIndex].begin(),
									StackPtrCopiesVector[StackPtrCopiesVecIndex].end(),
									NewStackPtrCopiesSet.begin(), NewStackPtrCopiesSet.end(),
									std::inserter(UnionedStackPtrCopiesSet, UnionedStackPtrCopiesSet.begin()));
							}
							else { // just copy the NewStackPtrCopiesSet
								EndIter = std::copy(NewStackPtrCopiesSet.begin(), NewStackPtrCopiesSet.end(),
									std::inserter(UnionedStackPtrCopiesSet, UnionedStackPtrCopiesSet.begin()));
							}

							// To assist in stack transformation security defenses, if the 
							//  memory range expression is stack-relative, we emit an annotation
							//  that identifies the memory range written to at InstAddr.
							// The annotation should look like:
							//  [hex InstAddr] [InstSize] INSTR STACKMEMRANGE MIN [ESP-k] MAX [ESP-j] ZZ
							set<STARS_ea_t> StaticMemLeaAddrSet;
							bool StackAnnotation = false;
							if (LimitSuccess && MemWriteLowerBoundExpanded->IsStackPtrPlusOffset() && MemWriteUpperBoundExpanded->IsStackPtrPlusOffset()) {
								this->EmitStackMemRangeAnnotations(MemWriteLowerBoundExpanded, MemWriteUpperBoundExpanded, PositiveIncrement, UnionedStackPtrCopiesSet, StaticMemLeaAddrSet);
								StackAnnotation = true;
							}
							if (!StaticMemLeaAddrSet.empty()) {
								SMP_msg("INFO: StaticMemLeaAddrSet size (writes): %zu \n", StaticMemLeaAddrSet.size());
							}

							STARSExprBoundsPair InsertExprPair;
							if (PositiveIncrement) {
								InsertExprPair.first = MemWriteLowerBoundExpanded;
								InsertExprPair.second = MemWriteUpperBoundExpanded;
							}
							else {
								InsertExprPair.first = MemWriteUpperBoundExpanded;
								InsertExprPair.second = MemWriteLowerBoundExpanded;
							}
							pair<STARSExprBoundsIter, bool> InsertResult = this->LoopMemWriteBoundsExprsExpanded[LoopIndex].insert(InsertExprPair);

							// Does the mem write depend on an InArg?
							if (MemWriteLowerBoundExpanded->UsesInArgReg() || (LimitSuccess && MemWriteUpperBoundExpanded->UsesInArgReg())) {
								this->HasLoopInArgMemWrites = true;
							}

							// To assist in defenses related to static global memory, output ranges
							//  of memory written when the Init address has simplified to a constant
							if (MemWriteLowerBoundExpanded->IsConstExpr()) {
								SMPInstr *MemWriteInst = MemWriteLowerBoundExpanded->GetOriginalParentInst();
								STARS_ea_t MemWriteAddr = MemWriteInst->GetAddr();
								if (StaticMemWriteAddrsEmitted.find(MemWriteAddr) == StaticMemWriteAddrsEmitted.cend()) {
									if (LimitSuccess && (MemWriteAddr != MemWriteUpperBoundExpanded->GetOriginalParentInst()->GetAddr())) {
										SMP_msg("ERROR: LOOP: Constant lower and upper bound exprs don't have same InstAddr for MemListIndex %zu loop %zu in func %s.\n",
											MemListIndex, LoopIndex, this->GetFuncName());
									}
									else {
										int InstSize = MemWriteInst->GetSize();
										STARS_uval_t LowerBoundVal = MemWriteLowerBoundExpanded->GetConstLeftOperand()->GetImmedValue();
										STARS_uval_t UpperBoundVal = (STARS_uval_t) 0xfffffffffffffffe; // Code for unknown limit value
										if (LimitSuccess
											&& (nullptr != MemWriteUpperBoundExpanded->GetConstLeftOperand())
											&& (MemWriteUpperBoundExpanded->GetConstLeftOperand()->IsImmedOp())) {
											UpperBoundVal = MemWriteUpperBoundExpanded->GetConstLeftOperand()->GetImmedValue();
										}
										FILE *InfoAnnotFile = global_STARS_program->GetInfoAnnotFile();
										SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR STATICMEMWRITE MIN ", (uint64_t)MemWriteAddr, InstSize);
										if (LowerBoundVal == UpperBoundVal) {
											// A simple read from a static mem address with no index registers has
											//  the same expr for lower bound and upper bound. Use the ByteWidth to 
											//  expand the upper bound.
											uint16_t ByteWidth = MemWriteLowerBoundExpanded->GetConstLeftOperand()->GetByteWidth();
											UpperBoundVal += (STARS_uval_t)ByteWidth;
										}
										if (LowerBoundVal < UpperBoundVal) {
											SMP_fprintf(InfoAnnotFile, "%llx LIMIT %llx ZZ %s\n",
												(uint64_t) LowerBoundVal, (uint64_t) UpperBoundVal, MemWriteInst->GetDisasm());
										}
										else {
											SMP_fprintf(InfoAnnotFile, "%llx LIMIT %llx ZZ %s\n",
												(uint64_t) UpperBoundVal, (uint64_t) LowerBoundVal, MemWriteInst->GetDisasm());
										}
										(void) StaticMemWriteAddrsEmitted.insert(MemWriteAddr);
									}
								}
							}
							else if (!StackAnnotation) {
								// LowerBoundExpanded expr was neither static mem nor stack.
								SMP_msg("INFO: LOOP: Expr neither static mem nor stack mem: \n");
								if (VerboseOutput) {
									MemWriteLowerBoundExpanded->Dump(0);
								}
							}
						}
						else {
							SMP_msg("ERROR: LOOP: ExpandExprToInArg failure for MemListIndex %zu loop %zu in function at %llx\n",
								MemListIndex, LoopIndex, (uint64_t) this->GetFirstFuncAddr());
							delete MemWriteLowerBoundExpanded;
							delete MemWriteUpperBoundExpanded;
							this->LoopAnalysisProblems[LoopIndex] = true;
						}
					}
					else {
						SMP_msg("ERROR: LOOP: Failure to ReplaceIVsWithInitExpr (writes) for MemListIndex %zu loop %d in function at %llx\n",
							MemListIndex, LoopIndex, (uint64_t) this->GetFirstFuncAddr());
						delete MemWriteLowerBound;
						if (nullptr != MemWriteUpperBound)
							delete MemWriteUpperBound;
						this->LoopAnalysisProblems[LoopIndex] = true;
					}
				}
				else {
					SMP_msg("ERROR: LOOP: Failure to ReplaceIVsWithInitExpr (writes) for MemListIndex %zu loop %d in function at %llx\n",
						MemListIndex, LoopIndex, (uint64_t) this->GetFirstFuncAddr());
					MemWriteLowerBound->Dump(0);
					delete MemWriteLowerBound;
					this->LoopAnalysisProblems[LoopIndex] = true;
				}
			} // end for all entries in MemWriteExprWidths
		}
		else {
			if (this->LoopWritesMemory[LoopIndex]) {
				SMP_msg("ERROR: LOOP: Failure to CreateSPARKMemRangeExpr for loop %d in function at %llx\n",
					LoopIndex, (uint64_t) this->GetFirstFuncAddr());
				this->LoopAnalysisProblems[LoopIndex] = true;
			}
			else {
				SMP_msg("INFO: LOOP: No memory writes in loop %d in function at %llx\n",
					LoopIndex, (uint64_t) this->GetFirstFuncAddr());
			}
		}

		// Now, do memory read range analysis and produce memory read range annotations.
		STARSExprSet MemReadExprs;
		STARSMemWriteExprsList MemReadExprWidths;
		StackPtrCopiesVector.clear();
		MemRangeSuccess = this->CreateSPARKMemoryReadRangeExprs(LoopIndex, false, LoopRegHashes, MemReadExprs, MemReadExprWidths, StackPtrCopiesVector);
		if (MemRangeSuccess) {
			SMP_msg("INFO: LOOP: Computed %zu LoopMemReadExprs for loop %d in function at %llx\n",
				MemReadExprs.size(), LoopIndex, (uint64_t) this->GetFirstFuncAddr());
			for (STARSMemWriteExprListIter MemListIter = MemReadExprWidths.begin(); MemListIter != MemReadExprWidths.end(); ++MemListIter) {
				STARSExprSetIter ExprIter = (*MemListIter).first;
				STARSExpression *MemAddressRangeExpr = (*ExprIter);
//				pair<STARSExprSetIter, bool> InsertResult = this->LoopMemWriteRangeExprs[LoopIndex].insert(MemAddressRangeExpr);
				if (VerboseOutput) {
					SMP_msg("INFO: LOOP: LoopMemRead EXPR:");
					MemAddressRangeExpr->Dump(0);
					if (MemAddressRangeExpr->IsStackPtrRegUsed()) {
						SMP_msg("INFO: LOOP: LoopMemRead EXPR is stack-based.\n");
					}
				}
				// Create the lower and upper bounds of the memory writing range.
				STARSExpression *MemReadLowerBound = MemAddressRangeExpr->Clone(); // deep copy, for editing
				bool changed = false;
				bool success = this->ReplaceAllIVsWithExprs(LoopIndex, MemReadLowerBound, true, changed);
				if (success) {
					STARSExpression *MemReadUpperBound = MemAddressRangeExpr->Clone(); // deep copy, for editing
					success = this->ReplaceAllIVsWithExprs(LoopIndex, MemReadUpperBound, false, changed);
					if (success) {
						if (VerboseOutput) {
							SMP_msg("INFO: LOOP: Successfully replaced read IVs with InitExpr and LimitExpr for loop %d in function at %llx\n",
								LoopIndex, (uint64_t) this->GetFirstFuncAddr());
						}
						(void) MemReadLowerBound->SimplifyDriver();
						(void) MemReadUpperBound->SimplifyDriver();

						int StackPtrCopiesVecIndex = (*MemListIter).second.second;
						if (VerboseOutput) {
							SMP_msg("INFO: LOOP: MemReadLowerBound EXPR:");
							MemReadLowerBound->Dump(0);
							SMP_msg("INFO: LOOP: MemReadUpperBound EXPR:");
							MemReadUpperBound->Dump(0);
						}

						// Clone the lower and upper bounds expr for inheritance into callers (future SPARK).
						STARSExpression *MemReadLowerBoundExpanded = MemReadLowerBound->Clone();
						STARSExpression *MemReadUpperBoundExpanded = MemReadUpperBound->Clone();

						// For annotation output, we want to expand the expressions beyond loop boundaries,
						//  hopefully as far as incoming args or constant initializers.
						set<STARS_ea_t> NewStackPtrCopiesSet;
						success = this->ExpandExprToInArgs(LoopIndex, MemReadLowerBoundExpanded, true, NewStackPtrCopiesSet);
						if (success) {
							success = this->ExpandExprToInArgs(LoopIndex, MemReadUpperBoundExpanded, false, NewStackPtrCopiesSet);
						}
						if (success) {
							if (VerboseOutput) {
								SMP_msg("INFO: LOOP: MemReadLowerBoundExpanded EXPR:");
								MemReadLowerBoundExpanded->Dump(0);
								SMP_msg("INFO: LOOP: MemReadUpperBoundExpanded EXPR:");
								MemReadUpperBoundExpanded->Dump(0);
							}

							set<STARS_ea_t> UnionedStackPtrCopiesSet;
							insert_iterator<set<STARS_ea_t> > EndIter = std::inserter(UnionedStackPtrCopiesSet, UnionedStackPtrCopiesSet.begin());
							if (0 <= StackPtrCopiesVecIndex) { // vector has entry for a set
								EndIter = merge(StackPtrCopiesVector[StackPtrCopiesVecIndex].begin(),
									StackPtrCopiesVector[StackPtrCopiesVecIndex].end(),
									NewStackPtrCopiesSet.begin(), NewStackPtrCopiesSet.end(),
									std::inserter(UnionedStackPtrCopiesSet, UnionedStackPtrCopiesSet.begin()));
							}
							else { // just copy the NewStackPtrCopiesSet
								EndIter = std::copy(NewStackPtrCopiesSet.begin(), NewStackPtrCopiesSet.end(),
									std::inserter(UnionedStackPtrCopiesSet, UnionedStackPtrCopiesSet.begin()));
							}

							// To assist in stack transformation security defenses, if the 
							//  memory range expression is stack-relative, we emit an annotation
							//  that identifies the memory range written to at InstAddr.
							// The annotation should look like:
							//  [hex InstAddr] [InstSize] INSTR STACKMEMRANGE MIN [ESP-k] MAX [ESP-j] ZZ
							set<STARS_ea_t> StaticMemLeaAddrSet;
							if (MemReadLowerBoundExpanded->IsStackPtrPlusOffset() && MemReadUpperBoundExpanded->IsStackPtrPlusOffset()) {
								this->EmitStackMemRangeAnnotations(MemReadLowerBoundExpanded, MemReadUpperBoundExpanded, PositiveIncrement, UnionedStackPtrCopiesSet, StaticMemLeaAddrSet);
							}
							if (!StaticMemLeaAddrSet.empty()) {
								SMP_msg("INFO: LOOP: StaticMemLeaAddrSet size (reads): %zu \n", StaticMemLeaAddrSet.size());
							}

							// To assist in defenses related to static global memory, output ranges
							//  of memory written when the lower and upper bounds have simplified to
							//  constants.
							if (MemReadLowerBoundExpanded->IsConstExpr() && MemReadUpperBoundExpanded->IsConstExpr()) {
								SMPInstr *MemReadInst = MemReadLowerBoundExpanded->GetOriginalParentInst();
								STARS_ea_t MemReadAddr = MemReadInst->GetAddr();
								if (StaticMemReadAddrsEmitted.find(MemReadAddr) == StaticMemReadAddrsEmitted.cend()) {
									if (MemReadAddr != MemReadUpperBoundExpanded->GetOriginalParentInst()->GetAddr()) {
										SMP_msg("ERROR: LOOP: Constant lower and upper bound exprs don't have same InstAddr for loop %zu in func %s.\n",
											LoopIndex, this->GetFuncName());
									}
									else {
										int InstSize = MemReadInst->GetSize();
										STARS_uval_t LowerBoundVal = MemReadLowerBoundExpanded->GetConstLeftOperand()->GetImmedValue();
										STARS_uval_t UpperBoundVal = MemReadUpperBoundExpanded->GetConstLeftOperand()->GetImmedValue();
										FILE *InfoAnnotFile = global_STARS_program->GetInfoAnnotFile();
										// NOTE: Replace WRITE with READ or RANGE in annotation
										SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR STATICMEMWRITE MIN ", (uint64_t) MemReadAddr, InstSize);
										if (LowerBoundVal == UpperBoundVal) {
											// A simple read from a static mem address with no index registers has
											//  the same expr for lower bound and upper bound. Use the ByteWidth to 
											//  expand the upper bound.
											uint16_t ByteWidth = MemReadLowerBoundExpanded->GetConstLeftOperand()->GetByteWidth();
											UpperBoundVal += (STARS_uval_t) ByteWidth;
										}
										if (LowerBoundVal < UpperBoundVal) {
											SMP_fprintf(InfoAnnotFile, "%llx LIMIT %llx ZZ %s\n",
												(uint64_t) LowerBoundVal, (uint64_t) UpperBoundVal, MemReadInst->GetDisasm());
										}
										else {
											SMP_fprintf(InfoAnnotFile, "%llx LIMIT %llx ZZ %s\n",
												(uint64_t) UpperBoundVal, (uint64_t) LowerBoundVal, MemReadInst->GetDisasm());
										}
										(void) StaticMemReadAddrsEmitted.insert(MemReadAddr);
									}
								}
							}
						}
						else {
							SMP_msg("ERROR: LOOP: ExpandExprToInArg failure for loop %zu in function at %llx\n",
								LoopIndex, (uint64_t) this->GetFirstFuncAddr());
							delete MemReadLowerBoundExpanded;
							delete MemReadUpperBoundExpanded;
							// this->LoopAnalysisProblems[LoopIndex] = true;
						}
					}
					else {
						SMP_msg("ERROR: LOOP: Failure to ReplaceIVsWithLimitExpr for loop %zu in function at %llx\n",
							LoopIndex, (uint64_t) this->GetFirstFuncAddr());
						delete MemReadLowerBound;
						delete MemReadUpperBound;
						// this->LoopAnalysisProblems[LoopIndex] = true;
						// this->LoopWritesGlobalStaticMemory[LoopIndex] = false; // reset
					}
				}
				else {
					SMP_msg("ERROR: LOOP: Failure to ReplaceIVsWithInitExpr (reads) for loop %d in function at %llx\n",
						LoopIndex, (uint64_t) this->GetFirstFuncAddr());
					delete MemReadLowerBound;
					// this->LoopAnalysisProblems[LoopIndex] = true;
					// this->LoopWritesGlobalStaticMemory[LoopIndex] = false; // reset
				}
			}
		}
		else {
			if (this->LoopReadsMemory[LoopIndex]) {
				SMP_msg("ERROR: LOOP: Failure to CreateSPARKMemRangeExpr (reads) for loop %d in function at %llx\n",
					LoopIndex, (uint64_t) this->GetFirstFuncAddr());
				// this->LoopAnalysisProblems[LoopIndex] = true;
				// this->LoopWritesGlobalStaticMemory[LoopIndex] = false; // reset
			}
			else {
				SMP_msg("INFO: LOOP: No memory reads in loop %d in function at %llx\n",
					LoopIndex, (uint64_t) this->GetFirstFuncAddr());
			}
		}

	} // end for all loops: loop4, memory range analysis

	// Prepare for later SPARK Ada output.
	this->DetectInArgRegsNeededForMemWriteExprs();
	this->ExpandLoopRegHashExprs();

	return;
} // end of SMPFunction::AnalyzeLoopIterations()

// Helper for SSA subscript renumbering: return the next SSA number for the global name
//  and increment the SSACounter to prepare the next number. Push the returned number onto
//  the SSAStack for the global name.
int SMPFunction::SSANewNumber(std::size_t GlobNameIndex) {
	int Subscript = this->SSACounter.at(GlobNameIndex);
	++(this->SSACounter[GlobNameIndex]);
	// We will keep the highest SSANum in the MaxDirectStackAccessSSANum. Could get
	//  more precise and track reg and stack SSANum maxima separately, but that would
	//  slow down this method because we don't know what kind of operand GlobNameIndex
	//  refers to.
	if (this->GetMaxStackSSANum() < this->SSACounter[GlobNameIndex]) {
		this->SetMaxStackSSANum(this->SSACounter[GlobNameIndex]);
		assert(this->SSACounter[GlobNameIndex] < 65500);
	}
	this->SSAStack[GlobNameIndex].push_back(Subscript);
	return Subscript;
} // end of SMPFunction::SSANewNumber()

#define STARS_TRACK_STACK_DEF_ADDRS 1

// Main helper for SSA subscript renumbering. Renumber within block throughout its phi
//  functions, then its DEFs and USEs, then its phi successors. Recurse then on all
//  successors in the dominator tree.
void SMPFunction::SSARename(int BlockNumber) {
	assert(0 <= BlockNumber);
	assert(BlockNumber < this->BlockCount);

	SMPBasicBlock *CurrBlock = this->RPOBlocks.at((std::size_t) BlockNumber);

	bool DumpFlag = false;
#if SMP_DEBUG_DATAFLOW_VERBOSE
	DumpFlag |=	(0 == strcmp("main", this->GetFuncName()));
	DumpFlag |= (0 == strcmp("dohanoi", this->GetFuncName()));
	DumpFlag |= (0 == strcmp("uw_frame_state_for", this->GetFuncName()));
	DumpFlag |= (0 == strcmp("_IO_sputbackc", this->GetFuncName()));
#endif

	if (DumpFlag) SMP_msg("Entered SSARename for block number %d\n", BlockNumber);

	// For each phi function at the top of the block, rename the DEF of the phi function
	//  using SSANewNumber() on the global name index.
	set<SMPPhiFunction, LessPhi>::iterator CurrPhi;
	list<SMPPhiFunction> TempPhiList;
	int GlobalNameIndex;
	for (CurrPhi = CurrBlock->GetFirstPhi(); CurrPhi != CurrBlock->GetLastPhi(); ++CurrPhi) {
		STARSOpndTypePtr PhiDefOp = CurrPhi->GetAnyOp();
		GlobalNameIndex = CurrPhi->GetIndex();
		assert(0 <= GlobalNameIndex);
		int NewSSANum = this->SSANewNumber((std::size_t) GlobalNameIndex);

		// Cannot change the C++ STL set item directly, as sets might become unordered.
		SMPPhiFunction TempPhi = (*CurrPhi);
		TempPhi.SetSSADef(NewSSANum);
		TempPhiList.push_back(TempPhi);

		bool RegOpFlag = PhiDefOp->IsRegOp();
		if (RegOpFlag || MDIsDirectStackAccessOpnd(PhiDefOp, this->UsesFramePointer())) {
			if (DumpFlag && RegOpFlag && PhiDefOp->MatchesReg(STARS_x86_R_ax)) {
				SMP_msg("New EAX Phi Def SSANum: %d Block %d\n", NewSSANum, BlockNumber);
			}
			// Map the final SSA number to the block number.
			if (RegOpFlag) {
				int DefHashValue = HashGlobalNameAndSSA(PhiDefOp, NewSSANum);
				pair<int, STARS_ea_t> DefMapEntry(DefHashValue, (STARS_PSEUDO_ID_MIN + CurrBlock->GetNumber()));
				pair<map<int, STARS_ea_t>::iterator, bool> MapReturnValue = this->GlobalDefAddrBySSA.insert(DefMapEntry);
				assert(MapReturnValue.second);
			}
#if STARS_TRACK_STACK_DEF_ADDRS
			else {
				int64_t DefHashValue = HashGlobalStackNameAndSSA(PhiDefOp, NewSSANum, this->UsesFramePointer());
				pair<int64_t, STARS_ea_t> DefMapEntry(DefHashValue, ((int64_t)(STARS_PSEUDO_ID_MIN + CurrBlock->GetNumber())));
				pair<map<int64_t, STARS_ea_t>::iterator, bool>  MapReturnValue = this->GlobalStackDefAddrBySSA.insert(DefMapEntry);
				assert(MapReturnValue.second);
			}
#endif
		}
	}
	// Go back through the Phi function set and replace the items that need to be updated.
	list<SMPPhiFunction>::iterator TempIter;
	for (TempIter = TempPhiList.begin(); TempIter != TempPhiList.end(); ++TempIter) {
		// Use the op_t from the first phi use, because they are all the same.
		bool Erased = CurrBlock->ErasePhi(TempIter->GetPhiRef(0).GetOp());
		assert(Erased);
		// Now we can add back the phi function that had the DEF SSA number changed.
		bool Added = CurrBlock->AddPhi(*TempIter);
		assert(Added);
	}
	TempPhiList.clear();
	if (DumpFlag) SMP_msg("Processed phi functions at top.\n");

	// For each instruction in the block, rename all global USEs and then all global DEFs.
	vector<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;
	for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
		CurrInst = (*InstIter);
		set<DefOrUse, LessDefUse>::iterator CurrUse = CurrInst->GetFirstUse();
		STARS_ea_t InstAddr = CurrInst->GetAddr(); // for debugging break points
		while (CurrUse != CurrInst->GetLastUse()) {
			// See if Use is a global name.
			STARSOpndTypePtr UseOp = CurrUse->GetOp();
			STARSOpndSetIter GlobIter = this->GlobalNames.find(UseOp);
			if (GlobIter != this->GlobalNames.end()) { // found it
				unsigned int GlobIndex = ExtractGlobalIndex(*GlobIter);
				if (GlobIndex > this->SSAStack.size()) {
					// Get some debug info out to the log file before we crash.
					SMP_msg("FATAL ERROR: Bad GlobIndex: %u at %llx in %s\n", GlobIndex, (unsigned long long) InstAddr, this->GetFuncName());
					exit(EXIT_FAILURE);
				}
				// Set the SSA number for this use to the top of stack SSA # (back())
				int NewSSANum;
				if (this->SSAStack.at(GlobIndex).empty()) {
					// No top of stack entry to read.
#if SMP_DEBUG_UNINITIALIZED_SSA_NAMES
					if (!CurrInst->MDIsPopInstr() && (UseOp->IsRegOp())) {
						// POP uses the stack offset and generates spurious
						//  uninitialized variable messages for [esp+0].
						SMP_msg("WARNING: function %s : Use of uninitialized variable: ",
							this->GetFuncName());
						SMP_msg(" Variable: ");
						PrintListOperand(*GlobIter);
						SMP_msg(" Block number: %d Address: %llx Instruction: %s\n", BlockNumber,
							(unsigned long long) CurrInst->GetAddr(), CurrInst->GetDisasm());
					}
#endif
					NewSSANum = SMP_SSA_UNINIT;
				}
				else {
					NewSSANum = this->SSAStack.at(GlobIndex).back();
				}
				CurrUse = CurrInst->SetUseSSA(UseOp, NewSSANum);
				if (DumpFlag && (UseOp->IsRegOp()) && UseOp->MatchesReg(STARS_x86_R_ax)) {
					SMP_msg("New EAX Use SSANum: %d at %llx\n", NewSSANum, (unsigned long long) CurrInst->GetAddr());
				}
			}
			++CurrUse;
		} // end for all USEs
		set<DefOrUse, LessDefUse>::iterator CurrDef = CurrInst->GetFirstDef();
		while (CurrDef != CurrInst->GetLastDef()) {
			// See if Def is a global name.
			STARSOpndTypePtr DefOp = CurrDef->GetOp();
			STARSOpndSetIter GlobIter = this->GlobalNames.find(DefOp);
			if (GlobIter != this->GlobalNames.end()) { // found it
				unsigned int GlobIndex = ExtractGlobalIndex(*GlobIter);
				// Set the SSA number for this DEF to the SSANewNumber top of stack
				int NewSSANum = this->SSANewNumber(GlobIndex);
				CurrDef = CurrInst->SetDefSSA(DefOp, NewSSANum);
				bool RegOpFlag = DefOp->IsRegOp();
				if (RegOpFlag || MDIsDirectStackAccessOpnd(DefOp, this->UsesFramePointer())) {
					STARS_ea_t DefAddr = InstAddr;
					if (DumpFlag && RegOpFlag && DefOp->MatchesReg(STARS_x86_R_ax)) {
						SMP_msg("New EAX Def SSANum: %d at %llx\n", NewSSANum, (unsigned long long) DefAddr);
					}

					// Map the final SSA number to the DEF address.
					if (RegOpFlag) {
						int DefHashValue = HashGlobalNameAndSSA(DefOp, NewSSANum);
						pair<int, STARS_ea_t> DefMapEntry(DefHashValue, DefAddr);
						pair<map<int, STARS_ea_t>::iterator, bool> MapReturnValue = this->GlobalDefAddrBySSA.insert(DefMapEntry);
						assert(MapReturnValue.second);
					}
#if STARS_TRACK_STACK_DEF_ADDRS
					else {
						int64_t DefHashValue = HashGlobalStackNameAndSSA(DefOp, NewSSANum, this->UsesFramePointer());
						pair<int64_t, STARS_ea_t> DefMapEntry(DefHashValue, DefAddr);
						pair<map<int64_t, STARS_ea_t>::iterator, bool> MapReturnValue = this->GlobalStackDefAddrBySSA.insert(DefMapEntry);
						assert(MapReturnValue.second);
					}
#endif
				}
			}
			++CurrDef;
		} //  end for all DEFs
	} // end for all instructions
	if (DumpFlag) SMP_msg("Processed all instructions.\n");

	// For all control flow graph (not dominator tree) successors, fill in the current
	//  (outgoing) SSA number in the corresponding USE slot in the phi function, for all
	//  global names appearing in phi functions.
	list<SMPBasicBlock *>::iterator SuccIter;
	for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
		// What position in the Preds list of this successor is CurrBlock?
		int ListPos = (*SuccIter)->GetPredPosition(BlockNumber);
		assert(0 <= ListPos);

		// Go through all phi functions in this successor. At ListPos position in the
		//  incoming arguments for that phi function, set the SSA number to the SSA number
		//  in the top of stack entry for the global name associated with that phi function.
		set<SMPPhiFunction, LessPhi>::iterator CurrPhi;
		for (CurrPhi = (*SuccIter)->GetFirstPhi(); CurrPhi != (*SuccIter)->GetLastPhi(); ++CurrPhi) {
			int GlobIndex = CurrPhi->GetIndex();
			int CurrSSA;
			if (this->SSAStack.at(GlobIndex).empty()) {
				// No top of stack entry to read.
#if SMP_DEBUG_UNINITIALIZED_SSA_NAMES
				SMP_msg("WARNING: function %s : Path to use of uninitialized variable: ",
					this->GetFuncName());
				SMP_msg(" Variable: ");
				PrintListOperand(CurrPhi->GetAnyOp());
				SMP_msg(" Block number: %d Successor block number: %d\n", BlockNumber,
					(*SuccIter)->GetNumber());
#endif
				CurrSSA = SMP_SSA_UNINIT;
			}
			else {
				CurrSSA = this->SSAStack.at(GlobIndex).back();  // fetch from top of stack
			}
			SMPPhiFunction TempPhi = (*CurrPhi);
			TempPhi.SetSSARef(ListPos, CurrSSA);
			TempPhiList.push_back(TempPhi);
			if (DumpFlag && (BlockNumber >= 3) && (BlockNumber <= 4)) {
				SMP_msg("BlockNumber: %d  ListPos: %d\n", BlockNumber, ListPos);
			}
		} // end for all phi functions in successor
		// Go back through the Phi function set and replace the items that need to be updated.
		for (TempIter = TempPhiList.begin(); TempIter != TempPhiList.end(); ++TempIter) {
#if 0
			if (DumpFlag && (BlockNumber >= 3) && (BlockNumber <= 4)) {
				SMP_msg("Special before phi dump:\n");
				set<SMPPhiFunction, LessPhi>::iterator FoundPhi;
				FoundPhi = (*SuccIter)->FindPhi(TempIter->GetAnyOp());
				FoundPhi->Dump();
			}
#endif
			// Use the op_t from the first phi use, because they are all the same.
			bool Erased = (*SuccIter)->ErasePhi(TempIter->GetPhiRef(0).GetOp());
			assert(Erased);
			// Now we can add back the phi function that had one SSA number changed.
			bool Added = (*SuccIter)->AddPhi(*TempIter);
			assert(Added);
			if (DumpFlag && (BlockNumber >= 3) && (BlockNumber <= 4)) {
				SMP_msg("Special after phi dump:\n");
				set<SMPPhiFunction, LessPhi>::iterator FoundPhi;
				FoundPhi = (*SuccIter)->FindPhi(TempIter->GetAnyOp());
				FoundPhi->Dump();
			}
		}
		TempPhiList.clear();
	} // end for all successors of CurrBlock
	if (DumpFlag) SMP_msg("Processed successor phi functions.\n");

	// For each operand in the LiveOut set, create a mapping from operand to SSA number.
	for (STARSOpndSetIter LiveOutIter = CurrBlock->GetFirstLiveOut(); LiveOutIter != CurrBlock->GetLastLiveOut(); ++LiveOutIter) {
		STARSOpndTypePtr LiveOutOp = (*LiveOutIter);
		unsigned int GlobIndex = LiveOutOp->GetOpGlobalIndex();
		int CurrSSA;
		if (this->SSAStack.at(GlobIndex).empty()) {
			// No top of stack entry to read.
			SMP_msg("WARNING: Uninit var, Block: %d GlobIndex: %d Operand:", BlockNumber, GlobIndex);
			PrintOperand(LiveOutOp);
			SMP_msg("\n");
			CurrSSA = 0; // SSA Marker rung will DEF the uninitialized variable
		}
		else {
			CurrSSA = this->SSAStack.at(GlobIndex).back();  // fetch from top of stack
		}
		pair<map<STARSOpndTypePtr, int, STARSLessOp>::iterator, bool> InsertResult = CurrBlock->AddLiveOutSSAMap(LiveOutOp, CurrSSA);
		assert(InsertResult.second);
	}

	// For each successor in the dominator tree, recurse.
	list<int>::iterator ChildIter;
	for (ChildIter = this->DomTree[BlockNumber].second.begin();
		ChildIter != this->DomTree[BlockNumber].second.end();
		++ChildIter) {
			this->SSARename(*ChildIter);
	}
	if (DumpFlag) SMP_msg("Finished recursion.\n");

	// Pop off all SSAStack entries pushed during this block. I.e. for each global name,
	//  pop its SSAStack once per DEF and once per phi function in this block.
	for (CurrPhi = CurrBlock->GetFirstPhi(); CurrPhi != CurrBlock->GetLastPhi(); ++CurrPhi) {
		GlobalNameIndex = CurrPhi->GetIndex();
		this->SSAStack.at((std::size_t) GlobalNameIndex).pop_back();
	}
	if (DumpFlag) SMP_msg("Popped off entries due to phi functions.\n");
	for (InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
		set<DefOrUse, LessDefUse>::iterator CurrDef;
		CurrInst = (*InstIter);
		for (CurrDef = CurrInst->GetFirstDef(); CurrDef != CurrInst->GetLastDef(); ++CurrDef) {
			// See if DEF is a global name.
			STARSOpndSetIter GlobIter = this->GlobalNames.find(CurrDef->GetOp());
			if (GlobIter != this->GlobalNames.end()) { // found it
				unsigned int GlobIndex = ExtractGlobalIndex(*GlobIter);
				this->SSAStack.at((std::size_t) GlobIndex).pop_back();
			}
		} //  end for all DEFs
	} // end for all instructions
	if (DumpFlag) { 
		SMP_msg("Popped off entries due to instructions.\n");
	}

	return;
} // end of SMPFunction::SSARename()

// Main driver of SSA subscript renumbering.
void SMPFunction::SSARenumber(void) {
	bool DumpFlag = false;
	this->SetMaxRegSSANum(0);
	this->SetMaxStackSSANum(0);
	this->MaxLocalSSANum = 0;
#if 0
	DumpFlag |= (0 == strcmp("_IO_sputbackc", this->GetFuncName()));
#endif

	if (this->GlobalNames.empty())
		return;  // no names to renumber

	// Initialize stacks and counters of SSA numbers.
	std::size_t GlobIndex;
	assert(0 == this->SSACounter.size());
	for (GlobIndex = 0; GlobIndex < this->GlobalNames.size(); ++GlobIndex) {
		list<int> DummyList;
		this->SSACounter.push_back(0);
		this->SSAStack.push_back(DummyList);
	}

	// Recurse through the dominator tree starting with node 0.
	this->SSARename(0);
	if (DumpFlag)
		this->Dump();
	SMP_msg("INFO: Max SSANum used for global names: %d\n", this->GetMaxStackSSANum());
	return;
} // end of SMPFunction::SSARenumber()

// Analyze system calls for memory overwriting safety
void SMPFunction::AnalyzeSystemCalls(void) {
	if (this->HasUnresolvedIndirectCalls())  // already going to be UNSAFE
		return;

	for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockIndex);
		if (CurrBlock->HasCallInstruction()) {
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				if ((INDIR_CALL == CurrInst->GetDataFlowType()) && CurrInst->MDIsSystemCall()) {
					STARSOpndTypePtr EAXOpnd = CurrInst->MakeRegOpnd(STARS_x86_R_ax);
					STARSDefUseIter UseIter = CurrInst->FindUse(EAXOpnd);
					if (UseIter != CurrInst->GetLastUse()) {
						// EAX encodes the syscall number
						bool GlobalName = this->IsGlobalName(EAXOpnd);
						STARS_ea_t EAXDefAddr = STARS_BADADDR;
						if (GlobalName) {
							EAXDefAddr = this->GetGlobalDefAddr(EAXOpnd, UseIter->GetSSANum());
						}
						else {
							EAXDefAddr = CurrBlock->GetDefAddrFromUseAddr(EAXOpnd, CurrInst->GetAddr(), UseIter->GetSSANum(), true);
						}
						bool UnresolvedCallee = ((STARS_BADADDR == EAXDefAddr) || (STARS_IsBlockNumPseudoID(EAXDefAddr))
							|| (STARS_IsSSAMarkerPseudoID(EAXDefAddr)));
						SMP_msg("INFO: System call number def addr at %llx\n", (unsigned long long) EAXDefAddr);
						STARS_uval_t SysCallNumber = 0;
						if (!UnresolvedCallee) {
							// We have an instruction addr in EAXDefAddr.
							SMPInstr *EAXDefInst = this->GetInstFromAddr(EAXDefAddr);
							if (EAXDefInst == NULL)
								UnresolvedCallee = true;

							STARSDefUseIter UseIter;
							if (!UnresolvedCallee) {
								UseIter = EAXDefInst->GetFirstUse(); 
								// check that there's at least 1 use.
								UnresolvedCallee = (UseIter == EAXDefInst->GetLastUse());
							}
							if (!UnresolvedCallee) {
								SMP_msg("INFO: System call set of EAX.  Use found.  at: %llx\n", (unsigned long long) EAXDefAddr);
								STARSOpndTypePtr UseOp = UseIter->GetOp();

								// if UseOp is NULL or not an immediate, callee cannot be resolved.
								if (UseOp == NULL)
									UnresolvedCallee = true;
								else
									UnresolvedCallee = (!UseOp->IsImmedOp());
				
								// Resolve if UseOp is an immediate.
								if (!UnresolvedCallee) {
									SysCallNumber = UseOp->GetImmedValue();
									SMP_msg("INFO: System call set of EAX.  Use was immed. %d  at: %llx\n", SysCallNumber, (unsigned long long) EAXDefAddr);
									++UseIter;

									// check there is not a second use.
									UnresolvedCallee = (UseIter != EAXDefInst->GetLastUse());
								}
							}
							if (!UnresolvedCallee) {
								SMP_msg("INFO: System call set of EAX determined solid. number: %u at: %llx\n", 
									(unsigned int) SysCallNumber, (unsigned long long) EAXDefAddr);
							}
						}
						if (!UnresolvedCallee) { // we have a good SysCallNumber
#define CGC_RECEIVE_SYSCALL 3
							UnresolvedCallee = ((CGC_RECEIVE_SYSCALL == SysCallNumber) && global_stars_interface->IsCGCBinary());
							// NOTE: Analyze Linux system calls also.
						}
						if (UnresolvedCallee) {
							this->SetHasUnresolvedIndirectCalls();
							return;
						}
					}
				} // end if system call
			} // end for all insts in block
		} // end if block has call instruction
	} // end for all blocks
} // end of SMPFunction::AnalyzeSystemCalls()

// Find variables that are USEd before they are DEFed
void SMPFunction::DetectUninitializedVars(void) {
	if (this->StackPtrAnalysisSucceeded()) {
		bool UseFP = this->UsesFramePointer();
		bool Binary32 = (global_STARS_program->GetSTARS_ISA_Bitwidth() <= 32);
		FILE *WarningsFile = global_STARS_program->GetUninitVarFile();
		assert(NULL != WarningsFile);
		// Look at stack vars with negative offsets in the SSA Marker inst
		list<SMPInstr *>::iterator InstIter = this->GetFirstInstIter();
		SMPInstr *MarkerInst = (*InstIter);
		assert(MarkerInst->IsMarkerInst());
#if 0   // MarkerInst only has normalized SSA names, never needed normalization
		assert(MarkerInst->AreDefsNormalized());
#endif
		for (STARSDefUseIter DefIter = MarkerInst->GetFirstDef(); DefIter != MarkerInst->GetLastDef(); ++DefIter) {
			STARSOpndTypePtr DefOp = DefIter->GetOp();
			if (MDIsDirectStackAccessOpnd(DefOp, UseFP)) {
				STARS_ea_t Offset = DefOp->GetAddr();
				int SignedOffset = (int) Offset;
				if (0 > SignedOffset) {
					SMP_msg("WARNING: Uninitialized stack var, offset %d, func at %llx\n", SignedOffset, (uint64_t) this->GetFirstFuncAddr());
				}
			}
			else if (Binary32 && DefOp->IsRegOp()) {
				// Pushes to save regs that are restored later will create a USE before DEF. Screen those out.
				if (!this->IsRegPreserved((size_t)DefOp->GetReg())) {
					SMP_msg("WARNING: Uninitialized reg, reg # %d, func at %llx\n", DefOp->GetReg(), (uint64_t) this->GetFirstFuncAddr());
				}
			}
		}
	}

	return;
} // end of SMPFunction::DetectUninitializedVars()

// Emit debugging output for analyzing time spent in InferTypes() ?
#define SMP_ANALYZE_INFER_TYPES_TIME 0

// Main driver for the type inference system.
void SMPFunction::InferTypes(bool FirstIter) {
	// The type inference system is an iteration over four analysis steps, until
	//  a fixed point is reached:
	// 1) Within an instruction, set types of operators based on the operator type,
	//     the operand types, and the instruction type category, and propagate the
	//     type of the SMP_ASSIGN operator to its DEF.
	// 2) Propagate the type of a DEF along its SSA chain to all USEs of that SSA name.
	// 3) If all USEs of an SSA name have the same type, but the DEF has no type,
	//     then infer that the DEF must have the same type.
	// 4) If all references to a memory location have the same type, mark that memory
	//     location as having that type, if no aliasing occurs.
	//
	// The type inference system will mark DEFs and USEs in each instruction's DEF and USE
	//  sets with an inferred type. This inference on USEs is not conclusive for other USEs
	//  outside of that instruction. For example, a pointer could be read in from memory
	//  and used as a pointer, then hashed using an arithmetic operation. If the arithmetic
	//  operation always treats its source operands as NUMERIC and produces a NUMERIC
	//  result, e.g. SMP_BITWISE_XOR, then the USE of that pointer is NUMERIC within
	//  this xor instruction. If the DEF at the beginning of the SSA chain for the pointer
	//  is eventually marked as POINTER, then all USEs in the chain will be marked POINTER
	//  as well (see step 2 above). This inconsistency along the USE chain is perfectly
	//  acceptable in our type system. It is important to mark the USEs according to how
	//  we observe them being used, because consistent USEs will propagate back up to
	//  the DEF in step 3 above.

	bool changed;
	bool NewChange = false;
#if SMP_ANALYZE_INFER_TYPES_TIME
	bool DebugFlag2 = false;
	DebugFlag2 |= (0 == strcmp("Option", this->GetFuncName()));
	long NewChangeCount;
	long IterationCount = 0;
#endif
#if SMP_DEBUG_TYPE_INFERENCE
	bool DebugFlag = false;
	DebugFlag |= (0 == strcmp("__libc_csu_init", this->GetFuncName()));
#endif
	list<SMPInstr *>::iterator InstIter;
	SMPInstr *CurrInst;
	set<DefOrUse, LessDefUse>::iterator CurrDef;
	set<DefOrUse, LessDefUse>::iterator NextDef;
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	STARS_ea_t InstAddr;

#if SMP_DEBUG_TYPE_INFERENCE
	if (DebugFlag) {
		this->Dump();
	}
#endif
	// One time only: Set the types of immediate values, flags register, stack and frame
	//  pointers, and floating point registers.
	if (FirstIter) {
		for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
			CurrInst = (*InstIter);
#if SMP_DEBUG_TYPE_INFERENCE
			if (DebugFlag) {
				SMP_msg("SetImmedTypes for inst at %x: %s\n", CurrInst->GetAddr(), CurrInst->GetDisasm());
			}
#endif
			CurrInst->SetImmedTypes(this->UseFP);
			// Infer signedness, bit width, and other info from the nature of the instruction
			//  (e.g. loads from stack locations whose signedness has been inferred earlier
			//  in FindOutGoingArgSize(), or inherently signed arithmetic opcodes like signed
			//  or unsigned multiplies and divides).
			if (this->HasGoodFGStackTable()) {
				CurrInst->MDSetWidthSignInfo(this->UseFP);
			}
		} // end for each inst
		// Check for signedness inferences from conditional branches at the end of blocks.
		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			CurrBlock = (*BlockIter);
			CurrBlock->MarkBranchSignedness();
			if (global_STARS_program->ShouldSTARSShadowCriticalArgs()) {
				(void) CurrBlock->MarkTaintWarningInArgs();
			}
		}

		// Find counter variables (e.g. init to zero or small constant, then just add or subtract small
		//  constant values. These cannot be POINTER and can be marked as NUMERIC.
		this->FindCounterVariables();
	} // end if (FirstIter)

	// Iterate until no more changes: set types in DEF and USE lists based on RTL
	//  operators and the instruction category, SSA DEF-USE chains, etc.
	do {
#if SMP_ANALYZE_INFER_TYPES_TIME
		if (DebugFlag2)
			++IterationCount;
#endif
#if 0
		do {
#endif
			changed = false;
#if SMP_ANALYZE_INFER_TYPES_TIME
			if (DebugFlag2)
				NewChangeCount = 0;
#endif
			// Step one: Infer types within instructions, context free.
			// Step two, propagating DEF types to all USEs, happens within step one
			//  whenever a DEF type is set for the first time.
			for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
				CurrInst = (*InstIter);
#if SMP_DEBUG_TYPE_INFERENCE
				if (DebugFlag) SMP_msg("Inferring types for %s\n", CurrInst->GetDisasm());
#endif
				NewChange = CurrInst->InferTypes();
				changed = (changed || NewChange);
#if SMP_ANALYZE_INFER_TYPES_TIME
				if (DebugFlag2 && NewChange) {
					STARS_ea_t InstAddr = CurrInst->GetAddr();
					++NewChangeCount;
				}
#endif
			}
#if SMP_ANALYZE_INFER_TYPES_TIME
			if (DebugFlag2) {
				SMP_msg(" InferTypes iteration: %ld NewChangeCount: %ld \n", IterationCount, NewChangeCount);
			}
#endif
#if 0
		} while (changed);
#endif
#if SMP_DEBUG_TYPE_INFERENCE
		if (DebugFlag) SMP_msg("Finished type inference steps 1 and 2.\n");
#endif
		// Step three: If all USEs of an SSA name have the same type, but the DEF has no
		//  type, then infer that the DEF must have the same type.
		this->TypedDefs = 0;
		this->UntypedDefs = 0;
		this->TypedPhiDefs = 0;
		this->UntypedPhiDefs = 0;
		// This step of the type inference might converge faster if we used a reverse iterator
		//  to go through the instructions, because we could infer a DEF, propagate it to
		//  the right hand side by making SMPInstr::InferOperatorType() public and calling it
		//  on the SMP_ASSIGN operator after we set the type of the left hand side (DEF). Any
		//  additional DEF inferences would be triggered mostly in the upwards direction by
		//  setting the type of one or more USEs in the current instruction. How much time gain
		//  could be achieved by doing this sequence is questionable.  !!!!****!!!!****
		for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
			CurrInst = (*InstIter);
			InstAddr = CurrInst->GetAddr();
			// Find any DEF that still has type UNINIT.
			CurrDef = CurrInst->GetFirstDef();
			while (CurrDef != CurrInst->GetLastDef()) {
				// Set erase() and insert() are needed to change types of DEFs, so
				//  get hold of the next iterator value now.
				NextDef = CurrDef;
				++NextDef;
				NewChange = false;
				if (UNINIT != CurrDef->GetType()) {
					++(this->TypedDefs);
				}
				else {
					STARSOpndTypePtr DefOp = CurrDef->GetOp();
					bool MemDef = (! DefOp->IsRegOp());
					bool AliasedMemWrite = (MemDef && CurrDef->HasIndirectWrite());
					++(this->UntypedDefs);
					if (MDIsIndirectMemoryOpnd(DefOp, this->UseFP)  // relax this?
#if 0
						|| (DefOp->IsStaticMemOp())
#endif
						|| AliasedMemWrite) {
						// Don't want to infer along DEF-USE chains for indirect
						//  memory accesses until we have alias analysis.
						++CurrDef;
						continue;
					}
					STARS_ea_t DefAddr = InstAddr;
					// Call inference method based on whether it is a block-local
					//  name or a global name.
					CurrBlock = CurrInst->GetBlock();
					if (CurrBlock->IsLocalName(DefOp)) {
						STARSOpndSetIter NameIter;
						NameIter = CurrBlock->FindLocalName(DefOp);
						assert(CurrBlock->GetLastLocalName() != NameIter);
						unsigned int LocIndex = ExtractGlobalIndex(*NameIter);
						NewChange = CurrBlock->InferLocalDefType(DefOp, LocIndex, DefAddr);
						if (NewChange) {
							--(this->UntypedDefs);
							++(this->TypedDefs);
						}
						changed = (changed || NewChange);
					}
					else {
						// global name
						bool CallInst = ((CALL == CurrInst->GetDataFlowType())
							|| (INDIR_CALL == CurrInst->GetDataFlowType()));
						int DefSSANum = CurrDef->GetSSANum();
						SMPOperandType DefType = UNINIT;
						DefType = this->InferGlobalDefType(DefOp,
							DefSSANum, CurrBlock, CallInst, DefAddr);
						if (IsNotEqType(UNINIT, DefType)) {
							CurrDef = CurrInst->SetDefType(DefOp, DefType);
							--(this->UntypedDefs);
							++(this->TypedDefs);
							NewChange = true;
							// If we have one or more USEs of type POINTER or CODEPTR and the
							//  other USEs are UNINIT, then InferGlobalDefType() will
							//  infer that it is a POINTER/CODEPTR. We want to propagate POINTER/CODEPTR
							//  to all the USEs now.
							if (IsDataPtr(DefType) || IsCodePtr(DefType)) {
								this->ResetProcessedBlocks();
								CurrInst->GetBlock()->PropagateGlobalDefType(DefOp, DefType, DefSSANum, IsMemOperand(DefOp), false);
							}
						}
						changed = (changed || NewChange);
					} // end if local name ... else ...
				} // end if (UNINIT != CurrDef->GetType()) .. else ...
				CurrDef = NextDef;
			} // end while all DEFs in the DEF set
		} // end for all instructions
#if SMP_DEBUG_TYPE_INFERENCE
		if (DebugFlag) SMP_msg("Finished type inference step 3.\n");
#endif

		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			CurrBlock = (*BlockIter);
			changed |= CurrBlock->InferAllPhiDefTypes();
		}

#if SMP_DEBUG_TYPE_INFERENCE
		if (DebugFlag) SMP_msg("Finished unconditional phi type inference.\n");
#endif

#if SMP_CONDITIONAL_TYPE_PROPAGATION
		if (!changed) { // Try conditional type propagation
			changed |= this->ConditionalTypePropagation();
#if SMP_DEBUG_TYPE_INFERENCE
			if (DebugFlag) {
				SMP_msg("changed = %d after conditional type propagation.\n", changed);
			}
#endif
		}
#endif

	} while (changed);

	// With type inference finished, infer signedness from the types, e.g.
	//  POINTER and CODEPOINTER types must be UNSIGNED.
	if (FirstIter) { // Don't want profiler-dependent signedness in the system yet.
		for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
			(*InstIter)->InferSignednessFromSMPTypes(this->UsesFramePointer());
		}
	}

	// Record the meet of all register types that reach RETURN instructions.
	(void) this->MDFindReturnTypes();
	return;
} // end of SMPFunction::InferTypes()

// Pass types across procedure bounds, return true if types change.
bool SMPFunction::InferInterproceduralTypes(void) {
	this->MDFindIncomingTypes();
	bool change = this->Instrs.front()->InferMarkerInstTypes();

	// See if new type inference can happen.
	// Reset the TypeInferenceComplete flag for call instructions, so they can pick up new callee type info.
	if (!this->IsLeaf()) {
		for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockIndex);
			if (CurrBlock->HasCallInstruction()) {
				for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
					SMPInstr *CurrInst = (*InstIter);
					SMPitype FlowType = CurrInst->GetDataFlowType();
					if ((CALL == FlowType) || (INDIR_CALL == FlowType)) {
						CurrInst->ResetTypeInferenceComplete();

						if (global_STARS_program->ShouldSTARSShadowCriticalArgs()) {
							// See if we are calling a function with critical InArgs.
							STARS_ea_t CalleeAddr = CurrInst->GetCallTarget();
							SMPFunction *CalleeFunc = nullptr;
							if (STARS_BADADDR != CalleeAddr) {
								CalleeFunc = this->GetProg()->FindFunction(CalleeAddr);
							}
							if (nullptr != CalleeFunc) {
								uint32_t CriticalArgPosBits = CalleeFunc->GetTaintInArgPositions();
								if (0 < CriticalArgPosBits) { // has critical InArgs
									size_t HighestBitPosSet = (size_t)HighestBitSetInUint(CriticalArgPosBits);
									for (size_t RegPos = 0; RegPos < HighestBitPosSet; ++RegPos) {
										if (CriticalArgPosBits & (1 << RegPos)) {
											STARS_regnum_t RegNum = (STARS_regnum_t)RegPos;
											STARSOpndTypePtr RegOp = CurrInst->MakeRegOpnd(RegNum);
											STARSDefUseIter UseIter = CurrInst->FindUse(RegOp);
											assert(UseIter != CurrInst->GetLastUse());
											STARS_ea_t InstAddr = CurrInst->GetAddr();
											int UseSSANum = UseIter->GetSSANum();
											bool LocalName = CurrBlock->IsLocalName(RegOp);
											STARSOpndTypePtr ArgSourceOp = nullptr;
											STARS_ea_t UltimateSourceAddr = CurrBlock->GetUltimateDefAddr(RegOp, InstAddr, UseSSANum, LocalName, true, false, ArgSourceOp);
											if (STARS_IsSSAMarkerPseudoID(UltimateSourceAddr) && (nullptr != ArgSourceOp) && ArgSourceOp->IsRegOp()) {
												size_t InArgPos = 0;
												if (global_STARS_program->GetArgRegPosition(ArgSourceOp->GetReg(), InArgPos)) {
													// Traced back to InArg reg. Is it a new trace?
													if (!this->IsCriticalInArg(InArgPos)) {
														// New discovery.
														this->MarkTaintInArgReg(InArgPos);
														change = true;
													}
												}
											}
										}
									} // end for all RegPos bits
								}
							}
						}
					}
				}
			}
		}
	}
	// Now, re-do type inference over the whole function.
	this->InferTypes(false);

	// Propagate fine-grained info.
	for (list<SMPBasicBlock *>::iterator BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		CurrBlock->PropagatePhiFGInfo();
	}
#if STARS_AGGRESSIVE_SIGNEDNESS_PROPAGATION
	change |= this->PropagateSignedness();
#endif

	change |= this->MDFindReturnTypes();

	return change;
} // end of SMPFunction::InferInterproceduralTypes()

// determine signedness and width info for all operands
void SMPFunction::InferFGInfo(void) {
	bool changed, NewChange;
	unsigned short IterCount = 0;
	list<SMPInstr *>::iterator InstIter;
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;

	do {
		changed = false;
		++IterCount;
		for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			NewChange = CurrInst->InferFGInfo(IterCount);
			changed = (changed || NewChange);
		}
		if (changed) {
			for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
				CurrBlock = (*BlockIter);
				CurrBlock->PropagatePhiFGInfo();
			}
		}
#if STARS_AGGRESSIVE_SIGNEDNESS_PROPAGATION
		if (!changed) {
			changed = this->PropagateSignedness();
		}
#endif
	} while (changed);

	return;
} // end of SMPFunction::InferFGInfo()

// Apply the profiler information to this function once we've inferred everything we can about it.
void SMPFunction::ApplyProfilerInformation(ProfilerInformation* pi)
{
	assert(pi);

	// If no profiler annotations are available, save time.
	if (0 == pi->GetProfilerAnnotationCount())
		return;

	SetIsSpeculative(true);	

	list<SMPInstr *>::iterator InstIter;
	
	bool DebugFlag = false;
#if SMP_DEBUG_PROFILED_TYPE_INFERENCE
	DebugFlag |= (0 == strcmp("dohanoi", this->GetFuncName()));
#endif

	// for each instruction in this function 
	for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		// lookup whether a load at this instruction was profiled as always numeric 
		InstructionInformation* ii = pi->GetInfo(CurrInst->GetAddr());
		if (ii && DebugFlag)
			SMP_msg("Found instruction information for %llx\n", (unsigned long long) CurrInst->GetAddr());
		if (ii && ii->isNumeric()) {
#if SMP_DEBUG_PROFILED_TYPE_INFERENCE
			SMP_msg("Found instruction information for %llx and it's numeric!\n", (unsigned long long) CurrInst->GetAddr());
#endif
			CurrInst->UpdateMemLoadTypes((SMPOperandType)(NUMERIC|PROF_BASE));
		}

		// lookup whether this instruction has been profiled as an indirect call
		set<STARS_ea_t> indirect_call_targets = pi->GetIndirectCallTargets(CurrInst->GetAddr());

		for (set<STARS_ea_t>::iterator ict_iter = indirect_call_targets.begin(); ict_iter != indirect_call_targets.end();	++ict_iter) {
			STARS_ea_t target = *ict_iter;
			pair<set<STARS_ea_t>::iterator, bool> InsertResult;
			if (STARS_BADADDR != target) {
				InsertResult = this->IndirectCallTargets.insert(target);
				if (InsertResult.second && (!vector_exists(target, AllCallTargets))) {
					AllCallTargets.push_back(target);
				}
			}
		}

	}
	return;
}	// end of SMPFunction::ApplyProfilerInformation

// For the UNINIT type DEF DefOp, see if all its USEs have a single type.
//  If so, set the DEF to that type and return type,
//  else return UNINIT.
// If DefAddr == STARS_BADADDR, then the DEF is in a Phi function, not an instruction.
SMPOperandType SMPFunction::InferGlobalDefType(const STARSOpndTypePtr &DefOp, int SSANum, SMPBasicBlock *DefBlock, bool CallInst, STARS_ea_t DefAddr) {
	bool DebugFlag = false;
	bool FoundNumeric = false;
	bool FoundCodePtr = false;
	bool FoundPointer = false;
	bool FoundUnknown = false;
	bool FoundUninit = false;
	bool FoundDEF;
	bool DefEscapes = true;

#if SMP_DEBUG_TYPE_INFERENCE
	DebugFlag |= (0 == strcmp("mem_init", this->GetFuncName()));
#endif

	if (DebugFlag) {
		SMP_msg("InferGlobalDefType for SSANum %d of ", SSANum);
		PrintOperand(DefOp);
		SMP_msg("\n");
	}

	vector<SMPInstr *>::iterator InstIter;

	assert(0 <= SSANum);
	set<DefOrUse, LessDefUse>::iterator CurrUse, CurrDef;
	// Go through all instructions in the block and find the instructions
	//  that have USEs of DefOp with SSANum. If all USEs in the chain have
	//  a single type (other than UNINIT), change the DEF type to match the
	//  USE type and set changed to true.
	SMPOperandType UseType = UNINIT;
	SMPOperandType PtrType = UNINIT;

	if (STARS_BADADDR == DefAddr) { // DEF is in a Phi function
		FoundDEF = true;
	}
	else { // DEF is in an instruction
		FoundDEF = false; // need to see the DefAddr first
	}

	for (InstIter = DefBlock->GetFirstInst(); DefEscapes && (InstIter != DefBlock->GetLastInst()); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		if ((!FoundDEF) && (DefAddr == CurrInst->GetAddr())) {
			FoundDEF = true;
		}
		else if (FoundDEF) {
			CurrDef = CurrInst->FindDef(DefOp);
			if (CurrDef != CurrInst->GetLastDef()) {
				// Found re-DEF of DefOp.
				DefEscapes = false;
			}
		}
		// NOTE: Following instructions should be inside if (FoundDEF) condition.
		CurrUse = CurrInst->FindUse(DefOp);
		if (CurrUse != CurrInst->GetLastUse()) { // found a USE of DefOp
			if (CurrUse->GetSSANum() == SSANum) { // matched SSA number
				UseType = CurrUse->GetType();
				FoundNumeric |= (IsNumeric(UseType));
				FoundCodePtr |= (IsCodePtr(UseType));
				FoundUnknown |= (IsUnknown(UseType));
				FoundUninit |= (IsEqType(UNINIT, UseType));
				if (IsDataPtr(UseType)) {
					if (FoundPointer) {
						if (IsNotEqType(PtrType, UseType)) {
#if SMP_DEBUG_TYPE_INFERENCE
							SMP_msg("WARNING: Differing ptr types in global chain:");
							SMP_msg(" Prev: %d Current: %d %s\n", PtrType, UseType,
								CurrInst->GetDisasm());
#endif
							PtrType = POINTER;
						}
					}
					else {
						FoundPointer = true;
						PtrType = UseType;
					}
				}
			} // end if matched SSA #
		} // end if found a USE of DefOp
	} // end for all instructions

	if (DefEscapes) { // did not find re-def
		DefEscapes = DefBlock->IsLiveOut(DefOp);
	}


	if (DefEscapes) { // Need to recurse into successor blocks
		list<SMPBasicBlock *>::iterator SuccIter;
		STARS_ea_t TempAddr;
		this->ResetProcessedBlocks(); // set up recursion
		for (SuccIter = DefBlock->GetFirstSucc(); SuccIter != DefBlock->GetLastSucc(); ++SuccIter) {
			SMPBasicBlock *CurrBlock = (*SuccIter);
			set<SMPPhiFunction, LessPhi>::iterator PhiIter = CurrBlock->FindPhi(DefOp);
			TempAddr = DefAddr;
			if (PhiIter != CurrBlock->GetLastPhi()) {
				TempAddr = STARS_BADADDR;  // signals that DefOp will get re-DEFed in a Phi function.
			}
			else if (STARS_BADADDR == TempAddr) { // was STARS_BADADDR coming in to this function
				// We don't want to pass STARS_BADADDR down the recursion chain, because it will be interpreted
				//  by each successor block to mean that DefOp was a Phi USE that got re-DEFed in a Phi function
				//  within itself. Pass the dummy address that indicates LiveIn to the block.
				TempAddr = STARS_LIVEIN_PSEUDO_ID;
			}

			if (CurrBlock->IsLiveIn(DefOp)) {
				CurrBlock->InferGlobalDefType(DefOp, SSANum, TempAddr, FoundNumeric, FoundCodePtr, FoundPointer, FoundUnknown, FoundUninit, PtrType);
			}
		}
	}

	// Do we have a consistent type?
	// If we see any definite POINTER uses, we must set the DEF
	//  to type POINTER or a refinement of it.
	if (FoundPointer)
		UseType = PtrType;
	else if (FoundCodePtr && !FoundUnknown)
		UseType = CODEPTR; // Give CODEPTR priority over NUMERIC and UNINIT
	else if (FoundNumeric && !FoundUninit && !FoundUnknown)
		UseType = NUMERIC;
	else
		return UNINIT; // no POINTER, but no consistent type

	assert(UNINIT != UseType);
	if (DebugFlag) SMP_msg("Inferring global DEF of type %d\n", UseType);

	return UseType;
} // end of SMPFunction::InferGlobalDefType()

// Mark NUMERIC (and propagate) any DEF that starts at small immed. value and gets only small inc/dec operations.
void SMPFunction::FindCounterVariables(void) {
	// We define a counter variable as one that starts out as a small immediate value and then is only updated
	//  via small additions and subtractions. This cannot produce a POINTER, so it must be NUMERIC. This routine
	//  helps get the NUMERIC inference past the phi function barrier, e.g.:
	//
	//  mov eax,0    ; might be NULL POINTER or might be NUMERIC
	//  eax2 := phi(eax1, eax0)  ; still don't know type
	//  label1:    ; top of loop
	//   :
	//  add eax,4  ; increment induction variable; eax1 := eax0 + 4
	//  cmp eax,looplimit
	//  jl label1
	//
	//  Viewed in isolation, adding 4 to EAX could be a pointer operation or a numeric operation, and
	//   the same is true for initializing to zero. Viewed together, these statements obviously cannot be
	//   producing a POINTER value, as 0,4,8, etc. are not a sequence of POINTER values.

	list<SMPInstr *>::iterator InstIter;
	for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		bool ValueFound;
		STARS_uval_t ConstValue;
		if (CurrInst->MDIsSimpleAssignment(ValueFound, ConstValue)) {
			if (ValueFound && (0 == ConstValue)) {
				// Start small: Find init to zero, then track it. Init to small values after we test.
				set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->GetFirstNonFlagsDef();
				if (DefIter == CurrInst->GetLastDef()) {
					// Must have been a simple assignment to a flag, e.g. clc (clear the carry flag).
					continue;
				}
				STARSOpndTypePtr DefOp = DefIter->GetOp();
				if (DefOp->IsRegOp()) {
					list<pair<int, STARS_ea_t> > CounterSSANums;  // SSA numbers that are definitely counters for DefOp
					int DefSSANum = DefIter->GetSSANum();
					STARS_ea_t DefAddr = CurrInst->GetAddr();
					pair<int, STARS_ea_t> ListItem(DefSSANum, DefAddr);
					CounterSSANums.push_back(ListItem);
					SMPBasicBlock *CurrBlock = CurrInst->GetBlock();
					int BlockNum = CurrBlock->GetNumber();
					bool LocalName = CurrBlock->IsLocalName(DefOp);
					if (this->CounterVarHelper(DefOp, DefSSANum, BlockNum, LocalName, CounterSSANums)) {
						while (!CounterSSANums.empty()) {
							int CurrSSANum = CounterSSANums.front().first;
							STARS_ea_t CurrDefAddr = CounterSSANums.front().second;
							bool Propagated;
							if (LocalName) {
								Propagated = CurrBlock->PropagateLocalDefType(DefOp, NUMERIC, CurrDefAddr, CurrSSANum, false);
							}
							else {
								this->ResetProcessedBlocks();
								Propagated = CurrBlock->PropagateGlobalDefType(DefOp, NUMERIC, CurrSSANum, false, false);
							}
							CounterSSANums.pop_front();
						}
					}
				} // end if reg type
			} // end if const value of 0
		} // end if simple assignment
	} // end for all instructions

	return;
} // end of SMPFunction::FindCounterVariables()

// recursive helper for FindCounterVariables()
// Return true if we added to the DefSSANums list.
bool SMPFunction::CounterVarHelper(const STARSOpndTypePtr &DefOp, int DefSSANum, int BlockNum, bool LocalName, list<pair<int, STARS_ea_t> > &CounterSSANums) {
	bool ListExpanded = false;
	std::size_t IncomingListSize = CounterSSANums.size();
	set<int> NonEscapingRegisterHashes;

	// First, examine the Phi list to find uses of DefOp/DefSSANum.
	// Next, examine instructions to find uses of DefOp/DefSSANum. They must be counter operations if DefOp is re-defed.

	// As a first cut, we will just find the following pattern:
	// 1. Counter-style DEF reaches the end of the current block.
	// 2. Successor block is a single-block loop with counter DEF appearing as a USE in a phi function.
	// 3. Within the single-block loop, Phi DEF is used in a counter-style operation, with new DEF becoming a Phi USE at top of block.
	// We will expand this to loops that are not in a single block later.
	SMPBasicBlock *CurrBlock = this->GetBlockByNum((std::size_t) BlockNum);
	assert(NULL != CurrBlock);
	STARS_ea_t DefAddr = CounterSSANums.front().second;
	if (CurrBlock->DoesDefReachBlockEnd(DefAddr, DefOp, DefSSANum, NonEscapingRegisterHashes)) {
		NonEscapingRegisterHashes.clear(); // Not memoizing for this use of DoesDefReachBlockEnd()
		bool LoopSuccFound = false;
		list<SMPBasicBlock *>::iterator SuccIter;
		SMPBasicBlock *SuccBlock;
		set<SMPPhiFunction, LessPhi>::iterator PhiIter;
		for (SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
			SuccBlock = (*SuccIter);
			if (SuccBlock->IsSelfLoop()) {
				PhiIter = SuccBlock->FindPhi(DefOp);
				if (PhiIter != SuccBlock->GetLastPhi()) { // Found a Phi function that could match
					std::size_t PhiSize = PhiIter->GetPhiListSize();
					for (std::size_t index = 0; index < PhiSize; ++index) {
						int PhiUseSSANum = PhiIter->GetUseSSANum(index);
						if (PhiUseSSANum == DefSSANum) {
							// Our DEF is a USE in the phi function at the top of SuccBlock. Success.
							int PhiDefSSANum = PhiIter->GetDefSSANum();
							LoopSuccFound = true;
							break;
						}
					}
				}
				if (LoopSuccFound) {
					break;
				}
			}
		}
		if (LoopSuccFound) {
			// SuccBlock points to a one-block loop with PhiIter pointing to a phi function that has
			//  DefOp/DefSSANum as a phi use, and DefOp/PhiDefSSANum as its phi def. Are the uses of
			//  DefOp/PhiDefSSANum within SuccBlock merely counter redefinitions?
			vector<SMPInstr *>::iterator InstIter;
			for (InstIter = SuccBlock->GetFirstInst(); InstIter != SuccBlock->GetLastInst(); ++InstIter) {
				set<DefOrUse, LessDefUse>::iterator DefIter, UseIter;
				SMPInstr *CurrInst = (*InstIter);
				DefIter = CurrInst->FindDef(DefOp);
				if (DefIter != CurrInst->GetLastDef()) {
					// Found a redefinition of DefOp. Is it just a counter operation that redefines DefOp?
					if (CurrInst->IsCounterOperation()) {
						// We will add the new DEF SSA # to the list of counter SSAs.
						pair<int, STARS_ea_t> CounterPair(DefIter->GetSSANum(), CurrInst->GetAddr());
						CounterSSANums.push_back(CounterPair);
						// We don't need to push the PhiDefSSANum discovered earlier, because if
						//  it follows the simple pattern of only using two counter DEFs as its USEs,
						//  one from before the loop and one from within the loop, then both of its USEs
						//  will get set to NUMERIC and propagation will occur naturally. If it does not
						//  fit this simple pattern, we don't want to force it to be NUMERIC yet.
					}
					else {
						// Problem: we redefined DefOp with a non-counter operation. We want to terminate
						//  the chain of detection of counter variables.
						break;
					}
				}
				else {
					UseIter = CurrInst->FindUse(DefOp);
					if (UseIter != CurrInst->GetLastUse()) {
						// Found USE of DefOp. See if it is a POINTER use, which would
						//  invalidate the hypothesis that DefOp is a counter.
						SMPOperandType UseType = UseIter->GetType();
						if (IsDataPtr(UseType) || IsEqType(UseType, CODEPTR)) {
							// Any apparent counter operations so far have really been pointer arithmetic.
							//  We need to restore the list to its incoming state.
							while (IncomingListSize < CounterSSANums.size()) {
								CounterSSANums.pop_back();
							}
							break; // terminate search
						}
					}
				}
			} // end for all insts in SuccBlock
		} // end if LoopSuccFound
	} // end if original pre-loop DEF reaches the end of its block

	ListExpanded = (CounterSSANums.size() > IncomingListSize);
	return ListExpanded;
} // end of SMPFunction::CounterVarHelper()

#define SMP_SIMPLE_CONDITIONAL_TYPE_PROPAGATION 1
#if SMP_SIMPLE_CONDITIONAL_TYPE_PROPAGATION
// The simple form of conditional type propagation observes that we
//  simply need to apply the meet operator over Phi function USEs and
//  then propagate any DEF type changes using PropagateGlobalDefType().
//  The outermost iteration over all type inference methods in InferTypes()
//  will take care of all the propagation that is handled by the work list
//  processing in the textbook algorithm.
// Iteration convergence might be slower in the simple approach, but the code
//  is much simpler to debug.
bool SMPFunction::ConditionalTypePropagation(void) {
	bool changed = false;
	SMPBasicBlock *CurrBlock;
	vector<SMPBasicBlock *>::iterator CurrRPO;
	set<SMPPhiFunction, LessPhi>::iterator CurrPhi;

	for (CurrRPO = this->RPOBlocks.begin(); CurrRPO != this->RPOBlocks.end(); ++CurrRPO) {
		CurrBlock = *CurrRPO;
		SMPOperandType MeetType;
		CurrPhi = CurrBlock->GetFirstPhi();
		while (CurrPhi != CurrBlock->GetLastPhi()) {
			STARSOpndTypePtr DefOp = CurrPhi->GetAnyOp();
			bool IsMemOp = (! DefOp->IsRegOp());
			MeetType = CurrPhi->ConditionalMeetType(CurrBlock);
			CurrPhi = CurrBlock->FindPhi(DefOp); // maybe stale, so re-find; could be changed by propagation in ConditionalMeetType()

			// Here we use a straight equality test, not our macros,
			//  because we consider it a change if the MeetType is
			//  profiler derived and the DEFType is not.
			if (MeetType != CurrPhi->GetDefType()) {
				// Change the DEF type to the MeetType and propagate.
				CurrPhi = CurrBlock->SetPhiDefType(DefOp, MeetType);
				changed = true;
				this->ResetProcessedBlocks();
				changed |= CurrBlock->PropagateGlobalDefType(DefOp,
					MeetType, CurrPhi->GetDefSSANum(), IsMemOp, false);
			}
			++CurrPhi;
		} // end for all phi functions in the current block
	} // end for all blocks

	return changed;
} // end of SMPFunction::ConditionalTypePropagation()

#else  // not SMP_SIMPLE_CONDITIONAL_TYPE_PROPAGATION

// Apply the SCC (Sparse Conditional Constant) propagation algorithm to
//  propagate types starting from unresolved Phi DEFs.
bool SMPFunction::ConditionalTypePropagation(void) {
	bool changed = false;

	// Collections of Phi functions and instructions that have a DEF
	//  with type UNINIT for the current global name.
	map<int, set<SMPPhiFunction, LessPhi>::iterator> UninitDEFPhis;
	vector<list<SMPInstr>::iterator> UninitDEFInsts;

	// Work lists of Phi functions and instructions that need to be processed
	//  according to the SCC algorithm.
	list<map<int, set<SMPPhiFunction, LessPhi>::iterator>::iterator> PhiWorkList;
	list<vector<list<SMPInstr>::iterator>::iterator> InstWorkList;

	// Iterate through all global names that are either (1) registers
	//  or (2) stack locations in SAFE functions.
	STARSOpndSetIter CurrGlob;
	for (CurrGlob = this->GetFirstGlobalName(); CurrGlob != this->GetLastGlobalName(); ++CurrGlob) {
		STARSOpndTypePtr GlobalOp = *CurrGlob;
		list<SMPBasicBlock>::iterator CurrBlock;
		vector<list<SMPBasicBlock>::iterator>::iterator CurrRPO;
		if (MDIsIndirectMemoryOpnd(GlobalOp, this->UseFP))
			continue; // need alias analysis to process indirect accesses
		if ((! GlobalOp->IsRegOp())
			&& (!((this->GetReturnAddressStatus() == FUNC_SAFE) && MDIsStackAccessOpnd(GlobalOp, this->UseFP))))
			continue; // not register, not safe stack access

		// Set up a map (indexed by SSANum) of iterators to Phi functions
		//  for the current global name that have UNINIT as the Phi DEF type.
		UninitDEFPhis.clear();
		UninitDEFInsts.clear();
		for (CurrRPO = this->RPOBlocks.begin(); CurrRPO != this->RPOBlocks.end(); ++CurrRPO) {
			CurrBlock = *CurrRPO;
			set<SMPPhiFunction, LessPhi>::iterator CurrPhi;
			CurrPhi = CurrBlock->FindPhi(GlobalOp);
			if (CurrPhi != CurrBlock->GetLastPhi()) {
				// Found Phi function for current global name.
				if (IsEqType(CurrPhi->GetDefType(), UNINIT)) {
					// Phi DEF is UNINIT; add Phi to the map.
					pair<int, set<SMPPhiFunction, LessPhi>::iterator> TempPair(CurrPhi->GetDefSSANum(), CurrPhi);
					bool Inserted = false;
					map<int, set<SMPPhiFunction, LessPhi>::iterator>::iterator WhereIns;
					pair<map<int, set<SMPPhiFunction, LessPhi>::iterator>::iterator, bool> Result(WhereIns, Inserted);
					Result = UninitDEFPhis.insert(TempPair);
					assert(Result.second == true);
				}
			}
		} // end for all blocks

		// If any Phi DEF had UNINIT as its type, set up a vector of
		//  iterators to instructions that have UNINIT as the DEF type
		//  for the current global name.
		if (UninitDEFPhis.empty())
			continue;
		list<SMPInstr *>::iterator InstIter;
		for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			set<DefOrUse, LessDefUse>::iterator CurrDef = CurrInst->FindDef(GlobalOp);
			if (CurrDef != CurrInst->GetLastDef()) {
				// Found DEF of current global name.
				if (IsEqType(UNINIT, CurrDef->GetType())) {
					UninitDEFInsts.push_back(CurrInst);
				}
			}
		} // end for all instructions

		// Put all UNINIT Phi DEFs that have at least one USE
		//  that is not UNINIT onto the PhiWorkList.
		map<int, set<SMPPhiFunction, LessPhi>::iterator>::iterator CurrUnPhi;
		for (CurrUnPhi = UninitDEFPhis.begin(); CurrUnPhi != UninitDEFPhis.end(); ++CurrUnPhi) {
			pair<int, set<SMPPhiFunction, LessPhi>::iterator> PhiDefPair(*CurrUnPhi);
			if (PhiDefPair.second->HasTypedUses()) {
				PhiWorkList.push_back(CurrUnPhi);
			}
		}

		// Iterate until both work lists are empty:
		while (!(PhiWorkList.empty() && InstWorkList.empty())) {
			// Process Phi items first.
			while (!PhiWorkList.empty()) {
				// If applying the meet operator over the Phi USE types
				//  would produce a new DEF type, change the DEF type and
				//  propagate it, adding Phi functions and instructions that
				//  received the propagated type to their respective work lists.
				map<int, set<SMPPhiFunction, LessPhi>::iterator>::iterator MapIter;
				MapIter = PhiWorkList.front();
				PhiWorkList.pop_front();  // remove from work list
				pair<int, set<SMPPhiFunction, LessPhi>::iterator> PhiDefPair;
				PhiDefPair.first = MapIter->first;
				PhiDefPair.second = MapIter->second;
				set<SMPPhiFunction, LessPhi>::iterator CurrPhi = PhiDefPair.second;
				SMPOperandType MeetType = CurrPhi->ConditionalMeetType();
				// Here we use a straight equality test, not our macros,
				//  because we consider it a change if the MeetType is
				//  profiler derived and the DEFType is not.
				if (MeetType == CurrPhi->GetDefType())
					continue;
				// At this point, we need to set the DEFType to the MeetType
				//  and propagate the change. We have a map of all the
				//  critical Phi functions for this global name, as well
				//  as a vector of the relevant instructions for this name.
				CurrPhi->SetDefType(MeetType);
				changed = true;
				int DefSSANum = CurrPhi->GetDefSSANum();
				map<int, set<SMPPhiFunction, LessPhi>::iterator>::iterator PhiIter;
				vector<list<SMPInstr>::iterator>::iterator InstIter;
				// Propagate to Phi functions first.
				for (PhiIter = UninitDEFPhis.begin(); PhiIter != UninitDEFPhis.end(); ++PhiIter) {
					if (DefSSANum == PhiIter->first)
						continue;  // Skip the Phi that we just changed
					for (std::size_t index = 0; index < PhiIter->second->GetPhiListSize(); ++index) {
						if (DefSSANum == PhiIter->second->GetUseSSANum(index)) {
							// Matched SSA # to USE. Propagate new type.
							PhiIter->second->SetRefType(index, MeetType);
							// Add this phi function to the work list.
							PhiWorkList.push_back(PhiIter);
						}
					}
				}
#define SMP_COND_TYPE_PROP_TO_INSTS 0
#if SMP_COND_TYPE_PROP_TO_INSTS
				// Propagate to instructions with uninit DEFs of global name.
				//  The idea is that the instructions that hold up type propagation
				//  are the ones that USE and then DEF the same global name.
				//  For example, "increment EAX" has to know the type of
				//  the USE of EAX in order to set the type of the DEF.
#endif
			} // end while the PhiWorkList is not empty
#if SMP_COND_TYPE_PROP_TO_INSTS
			// The PhiWorkList is empty at this point, so process
			//  instructions on the InstWorkList.
#endif
		} // end while both work lists are not empty

	} // end for all global names
	return changed;
} // end of SMPFunction::ConditionalTypePropagation()
#endif  // end if SMP_SIMPLE_CONDITIONAL_TYPE_PROPAGATION else ...

// Propagate signedness FG info from DEFs to USEs whenever there is no USE sign info.
bool SMPFunction::PropagateSignedness(void) {
	bool changed = false;
#if STARS_AGGRESSIVE_SIGNEDNESS_PROPAGATION
	map<int, struct FineGrainedInfo>::iterator UseFGIter, DefFGIter;
	list<SMPBasicBlock *>::iterator BlockIter;
	for (UseFGIter = this->GlobalUseFGInfoBySSA.begin(); UseFGIter != this->GlobalUseFGInfoBySSA.end(); ++UseFGIter) {
		struct FineGrainedInfo UseFG = UseFGIter->second;
		if (0 == (UseFG.SignMiscInfo & FG_MASK_SIGNEDNESS_BITS)) {
			// No signedness info. Propagate any signedness info from DEF.
			int UseHashValue = UseFGIter->first;
			unsigned short DefSignMask = this->GetDefSignMiscInfo(UseHashValue);
			DefSignMask &= FG_MASK_SIGNEDNESS_BITS;
			if (0 != DefSignMask) {
				// DEF has signedness info.
				UseFGIter->second.SignMiscInfo |= DefSignMask;
				changed = true;
			}
		}
	}
	// See if we have DEF signedness info for DEFs with no corresponding USE map entries.
	for (DefFGIter = this->GlobalDefFGInfoBySSA.begin(); DefFGIter != this->GlobalDefFGInfoBySSA.end(); ++DefFGIter) {
		struct FineGrainedInfo DefFG = DefFGIter->second;
		unsigned short DefSignMask = (DefFG.SignMiscInfo & FG_MASK_SIGNEDNESS_BITS);
		if (0 != DefSignMask) {
			// Has signedness info. See if USE has no entry.
			int DefHashValue = DefFGIter->first;
			UseFGIter = this->GlobalUseFGInfoBySSA.find(DefHashValue);
			if (UseFGIter == this->GlobalUseFGInfoBySSA.end()) {
				this->UpdateUseSignMiscInfo(DefHashValue, DefSignMask);
				changed = true;
			}
		}
	}
	// Do the same processsing for block-local registers.
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		bool NewChange = (*BlockIter)->PropagateDEFSignedness();
		changed = changed || NewChange;
	}
#endif
	return changed;
} // end of SMPFunction::PropagateSignedness()

void SMPFunction::UpdateLoopFollowBlockNum(int LoopHeadBlockNum, int FollowBlockNum) {
	assert(SMP_BLOCKNUM_UNINIT != LoopHeadBlockNum);
	size_t LoopNum = this->FindLoopNumFromHeadBlockNum(LoopHeadBlockNum);
	int OldFollowNum = this->LoopFollowNodes[LoopNum];
	if (OldFollowNum == SMP_BLOCKNUM_UNINIT) {
		this->LoopFollowNodes[LoopNum] = FollowBlockNum;
	}
	else if (OldFollowNum != FollowBlockNum) {
		// Conflicting follow block numbers
		// If we are just skipping past a block that has no instructions
		//  besides an unconditional jump to the new follow block, then that
		//  is fine. Otherwise, we have an unstructured CFG.
		SMPBasicBlock *OldFollowBlock = this->GetBlockByNum(OldFollowNum);
		SMPBasicBlock *NewFollowBlock = this->GetBlockByNum(FollowBlockNum);
		assert(nullptr != OldFollowBlock);
		assert(nullptr != NewFollowBlock);
		bool OldJumpOnly = OldFollowBlock->IsOnlyDirectJump();
		bool success = false;
		if (OldJumpOnly) {
			list<SMPBasicBlock *>::const_iterator SuccIter = OldFollowBlock->GetFirstConstSucc();
			assert(SuccIter != OldFollowBlock->GetLastConstSucc());
			if (FollowBlockNum == (*SuccIter)->GetNumber()) {
				SMP_msg("INFO: Replaced DirectJumpOnly loop follow block %d with %d for head block %d in %s\n", OldFollowNum,
					FollowBlockNum, LoopHeadBlockNum, this->GetFuncName());
				success = true;
			}
		}
		if (!success) {
			this->HasStructuredCFG = false;
			SMP_msg("ERROR: SPARK: Conflicting loop follow block nums for loop %d : %d and %d in %s\n",
				LoopNum, OldFollowNum, FollowBlockNum, this->GetFuncName());
		}
	}
	return;
} // end of SMPFunction::UpdateLoopFollowBlockNum()

// return -1 if HeaderBlockNum is not a loop header block #
int SMPFunction::GetLoopNumFromHeaderBlockNum(const int HeaderBlockNum) const {
	int LoopNumberFound = -1;
	for (size_t LoopNum = 0; LoopNum < this->LoopHeadBlockNumbers.size(); ++LoopNum) {
		if (this->LoopHeadBlockNumbers[LoopNum] == HeaderBlockNum) {
			LoopNumberFound = (int) LoopNum;
			break;
		}
	}
	return LoopNumberFound;
} // end of SMPFunction::GetLoopNumFromHeaderBlockNum()

// Detect and mark special cases before emitting numeric error and other security annotations.
void SMPFunction::MarkSpecialNumericErrorCases(void) {
	list<SMPBasicBlock *>::iterator BlockIter;
	vector<SMPInstr *>::reverse_iterator RevInstIter;
	SMPBasicBlock *CurrBlock;
	SMPInstr *CurrInst;
	bool DebugFlag = (0 == strcmp("sub_8063BE0", this->GetFuncName()));
	string FuncName(this->GetFuncName());
	bool StartupFunc = IsStartupFuncName(FuncName);

	set<int> NonEscapingRegisterHashes; // memoization optimization: set of register/SSA# hashes that do not reach end of block

	// Special-case preparatory analyses.
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		CurrBlock->AnalyzePrepForNumericAnnotations();
	}

	// Per the decompilation dissertation by Cristina Cifuentes, we scan the code in postorder sequence
	//  to find switch statements from innermost nesting outwards. Then we search for loop control flow,
	//  then we search for if-else control flow last. The control flow graph structuring algorithm requires
	//  a definite order because it is not a finite Church-Rosser system. Because we want to identify jumps
	//  to and around the default case that often precedes the switch indirect jump, and we do not want a LOOP_EXIT
	//  to be identified as a JUMP_TO_DEFAULT_CASE (which is not part of the Cifuentes analysis scheme), we 
	//  do the loop analysis before the switch analysis.
	if (this->HasStructuredControlFlow() && this->HasUnresolvedIndirectJumps()) { // redundant ???
		this->HasStructuredCFG = false;
	}

	// Detect loop-related control flow first, then switches, then simple if-else control flow.
	if ((this->LoopCount > 0) && this->HasReducibleControlFlow() && this->HasStructuredControlFlow()) {
		for (size_t BlockIndex = 0; (BlockIndex < (size_t)this->BlockCount) && this->HasStructuredControlFlow(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->GetBlockByNum(BlockIndex);
			vector<SMPInstr *>::reverse_iterator InstIter = CurrBlock->GetRevInstBegin();
			CurrInst = (*InstIter);
			STARS_ea_t InstAddr = CurrInst->GetAddr();
			SMPitype CurrDataFlowType = CurrInst->GetDataFlowType();

			// Find all conditional and unconditional jumps within the function.
			if ((JUMP == CurrDataFlowType) || (COND_BRANCH == CurrDataFlowType)) {
				bool InvertedExit = false;
				if (CurrInst->IsLoopExitStatement(InvertedExit)) {
					STARS_ea_t JumpTarget = CurrInst->GetJumpTarget();
					if ((STARS_BADADDR == JumpTarget) || (!this->IsInstIDInFunc(JumpTarget))) {
						this->HasStructuredCFG = false;
						SMP_msg("ERROR: SPARK: Bad jump target at %llx in %s\n", (uint64_t) InstAddr, this->GetFuncName());
						break; // no point in continuing if we cannot translate to SPARK Ada
					}

					int FollowBlockNum;
					if (!InvertedExit) {
						this->SetControlFlowType(InstAddr, LOOP_EXIT);
						SMPBasicBlock *TargetBlock = this->GetBlockFromInstAddr(JumpTarget);
						assert(nullptr != TargetBlock);
						FollowBlockNum = TargetBlock->GetNumber();
						if (TargetBlock->IsOnlyDirectJump()) {
							(*(TargetBlock->GetFirstInst()))->SetSPARKTranslated(); // Will likely be skipped, might produce null inst
							FollowBlockNum = (*(TargetBlock->GetFirstConstSucc()))->GetNumber();
						}
					}
					else {
						this->SetControlFlowType(InstAddr, INVERTED_LOOP_EXIT);
						list<SMPBasicBlock *>::const_iterator FallThroughIter = CurrBlock->GetFallThroughSucc();
						assert(FallThroughIter != CurrBlock->GetLastConstSucc());
						SMPBasicBlock *FallThroughBlock = (*FallThroughIter);
						FollowBlockNum = FallThroughBlock->GetNumber();
						if (FallThroughBlock->IsOnlyDirectJump()) {
							(*(FallThroughBlock->GetFirstInst()))->SetSPARKTranslated(); // Will likely be skipped, might produce null inst
							int NewFollowBlockNum = (*(FallThroughBlock->GetFirstConstSucc()))->GetNumber();
							SMP_msg("INFO: Special case of INVERTED_LOOP_EXIT from block %zu to %d to %d in %s\n",
								BlockIndex, FollowBlockNum, NewFollowBlockNum, this->GetFuncName());
							FollowBlockNum = NewFollowBlockNum;
						}
					}

					if (CurrBlock->IsLoopTailBlock()) {
						this->UpdateLoopFollowBlockNum(CurrBlock->GetLoopHeaderNumber(), FollowBlockNum);
					}
					else {
						int InnerLoopNum = this->GetInnermostLoopNum(CurrBlock->GetNumber());
						if (0 <= InnerLoopNum) {
							int HeaderBlockNum = this->LoopHeadBlockNumbers[InnerLoopNum];
							if (0 < HeaderBlockNum) {
								this->UpdateLoopFollowBlockNum(HeaderBlockNum, FollowBlockNum);
							}
						}
					}
				}
				else if (CurrBlock->IsLoopTailBlock()) {
					// We must be looping back to the loop header.
					this->SetControlFlowType(InstAddr, LOOP_BACK);
					if (COND_BRANCH == CurrDataFlowType) {
						int HeaderBlockNum = CurrBlock->GetLoopHeaderNumber();
						if (!CurrBlock->IsDoubleLoopTailBlock()) { // normal case
							list<SMPBasicBlock *>::const_iterator SuccIter = CurrBlock->GetCondOtherSucc(HeaderBlockNum);
							if (SuccIter != CurrBlock->GetLastConstSucc()) {
								int LoopIndex = this->GetLoopNumFromHeaderBlockNum(HeaderBlockNum);
								assert(0 <= LoopIndex);
								int FollowBlockNum = (*SuccIter)->GetNumber();
								if (!this->IsBlockInLoop(FollowBlockNum, (size_t)LoopIndex)) {
									this->UpdateLoopFollowBlockNum(HeaderBlockNum, FollowBlockNum);
								}
							}
							else {
								// Fall-through to next function, probably IDA Pro problem in func ID.
								this->HasStructuredCFG = false;
								SMP_msg("ERROR: SPARK: Fall-through from %llx is not in function %s\n",
									(uint64_t)InstAddr, this->GetFuncName());
							}
						}
						else { // double tail block
							// Block loops back to outer loop header, falls through to inner loop header.
							//  The FollowBlockNum is the block # of the successor of the outer loop header
							//   that does not enter the outer loop, i.e. it avoids the loop. This is the
							//   FollowBlockNum for both loops.
							int OuterHeadBlockNum = CurrBlock->GetOuterLoopHeaderNumberForDoubleTailBlock();
							assert(SMP_BLOCKNUM_UNINIT != OuterHeadBlockNum);
							int LoopIndex = this->GetLoopNumFromHeaderBlockNum(OuterHeadBlockNum);
							assert(0 <= LoopIndex);
							SMPBasicBlock *OuterHeadBlock = this->GetBlockByNum(OuterHeadBlockNum);
							list<SMPBasicBlock *>::const_iterator SuccIter = OuterHeadBlock->GetSuccNotInLoop((size_t) LoopIndex);
							int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
							if (SuccIter != OuterHeadBlock->GetLastConstSucc()) {
								FollowBlockNum = (*SuccIter)->GetNumber();
							}
							this->UpdateLoopFollowBlockNum(HeaderBlockNum, FollowBlockNum);
							this->UpdateLoopFollowBlockNum(OuterHeadBlockNum, FollowBlockNum);
						}
					}
				}
				else if (CurrBlock->IsOptimizedTopLoopTest() && (COND_BRANCH == CurrDataFlowType)) {
					this->SetControlFlowType(InstAddr, LOOP_BACK);
				}
				else if (JUMP == CurrDataFlowType) {
					// Search for jumps into optimized top-testing loops, which are optimized by compilers to
					//  transform the top-test to a bottom-test, and the loop is entered by jumping down to
					//  the bottom-test block, which is both a loop test block and a loop header block. As we
					//  do not reach this point if the jump is a LOOP_BACK or LOOP_EXIT, we only have to guard
					//  against false detection when an if-else jump within the loop happens to go to the loop
					//  test block.
					STARS_ea_t JumpTarget = CurrInst->GetJumpTarget();
					if ((STARS_BADADDR == JumpTarget) || (!this->IsInstIDInFunc(JumpTarget))) {
						SMP_msg("ERROR: SPARK: Bad jump target at %llx in %s\n", (uint64_t) InstAddr, this->GetFuncName());
						this->HasStructuredCFG = false;
						break; // no point in continuing if we cannot translate to SPARK Ada
					}
					else {
						SMPBasicBlock *TargetBlock = this->GetBlockFromInstAddr(JumpTarget);
						int TargetBlockNum = TargetBlock->GetNumber();
						assert((0 <= TargetBlockNum) && (TargetBlockNum < (int) this->RPOBlocks.size()));
						// See if TargetBlockNum is both a test block and a header block for a loop, and get
						//  that loop number if so.
						if (TargetBlock->IsLoopHeaderBlock()) {
							int HeaderBlockNum = TargetBlock->GetNumber();
							assert(0 <= HeaderBlockNum);
							int LoopNum = this->GetLoopNumFromHeaderBlockNum(HeaderBlockNum);
							assert(0 <= LoopNum);  // -1 return value should not happen for a true header block
							if (this->LoopTestBlocksByLoopNum[LoopNum] == TargetBlockNum) { // header & test block at once
								// Now, see if the current instruction is contained within the loop number; if so,
								//  this is not an entry into an optimized top-testing loop, but is simply control flow
								//  within the loop that happens to end up at the tail/test block.
								if (!this->IsBlockInLoop(CurrBlock->GetNumber(), (size_t)LoopNum)) {
									this->SetControlFlowType(InstAddr, JUMP_INTO_LOOP_TEST);
									TargetBlock->SetOptimizedTopLoopTest();
								}
							}
						}
					}
				}
			} // end if JUMP or COND_BRANCH on CurrInst
		} // end for all blocks
	} // end if LoopCount > 0 and structured CFG

	if (global_STARS_program->ShouldSTARSTranslateToSPARKAda()) {
		if (!StartupFunc) {
			if (this->HasStructuredControlFlow() && this->HasIndirectJumps()) {
				// Traverse CFG in post-order sequence and find indirect jumps and analyze the switch statements
				for (vector<SMPBasicBlock *>::reverse_iterator BlockIter = this->RPOBlocks.rbegin(); BlockIter != this->RPOBlocks.rend(); ++BlockIter) {
					SMPBasicBlock *CurrBlock = (*BlockIter);
					if (CurrBlock->HasIndirectJump()) {
						bool StructuredSwitch = this->AnalyzeSwitchStatement(CurrBlock);
						if (!StructuredSwitch) {
							this->HasStructuredCFG = false;
							SMP_msg("ERROR: SPARK: Unstructured switch statements in %s\n", this->GetFuncName());
							break; // give up as soon as CFG is unstructured
						}
					}
				}
			}

			// Now we have identified all switch, loop-back and loop-exit jumps. The remaining jumps
			//  must be part of if-then, if-then-else, and if-then-elsif ... constructs.
			if (this->HasStructuredControlFlow()) {
				bool StructuredConditionals = this->AnalyzeConditionalStatements();
				if (!StructuredConditionals) {
					this->HasStructuredCFG = false;
					SMP_msg("ERROR: SPARK: Unstructured COND_BRANCH statements in %s\n", this->GetFuncName());
#if 0
					this->Dump();
#endif
				}
				else if (this->HasStructuredControlFlow()) {
					this->FindGuardedLoops();
				}
			}
		}
	}

	if ((this->LoopCount == 0) && (1 < this->GetNumBlocks())) {
		return;
	}

	// Loop through blocks and detect tight loops of hashing arithmetic.
	//  We also include single-block functions of hashing arithmetic, as sometimes the loop
	//  is unrolled and the entire function is a hash.
#define STARS_MIN_HASH_BLOCK_SIZE 20

	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		int BlockNum = CurrBlock->GetNumber();
#if 0
		if (CurrBlock->IsLoopTailBlock() && CurrBlock->IsLoopHeaderBlock()) {
#else
		if (this->IsBlockInAnyLoop(BlockNum) || ((1 == this->GetNumBlocks()) && (STARS_MIN_HASH_BLOCK_SIZE <= this->Instrs.size()))
			|| this->FuncHasHashingCode()) {
#endif
			// We have a block in a loop, func with previous hash code, or a one-block function. This is the simple case we want
			//  to start with, as hash functions we have observed are tight loops of arithmetic computations.
			//  The next question is whether we can find the kind of shift/rotate that is common to hashing, plus
			//  at least one addition to the result of the shift/rotate.
			bool ShiftFound = false;
			bool AddFound = false;
			set<DefOrUse, LessDefUse>::iterator DefIter;
			NonEscapingRegisterHashes.clear();

			for (RevInstIter = CurrBlock->GetRevInstBegin(); RevInstIter != CurrBlock->GetRevInstEnd(); ++RevInstIter) {
				CurrInst = (*RevInstIter);
				if ((!ShiftFound) && CurrInst->MDIsHashingArithmetic()) {
					// If the operand being shifted is never used in any assignment or arithmetic
					//  except as an address register computation within a memory operand, then the
					//  shifted value does not reach the top of the loop and get shifts accumulated.
					//  In that case, we are not dealing with a shift-and-add type of hash function.
					//  So, do not claim success unless the later addition DEF reaches the end
					//  of the block.
					DefIter = CurrInst->GetFirstNonFlagsDef();
					STARS_ea_t DefAddr = CurrInst->GetAddr();
					STARSOpndTypePtr DefOp = DefIter->GetOp();
					STARS_ea_t AdditionAddr = STARS_BADADDR;
					ShiftFound = CurrBlock->IsDefInvolvedInAddition(DefAddr, DefOp, AdditionAddr);
					if (ShiftFound) {
						SMPInstr *AdditionInst = this->GetInstFromAddr(AdditionAddr);
						DefIter = AdditionInst->GetFirstNonFlagsDef();
						STARSOpndTypePtr AddDefOp = DefIter->GetOp();
						AddFound = CurrBlock->DoesDefReachBlockEnd(AdditionAddr, AddDefOp, DefIter->GetSSANum(), NonEscapingRegisterHashes);
						if (AddFound) {
							break;
						}
						else {
							// Reset ShiftFound and look for a different shift.
							ShiftFound = false;
						}
					}
				}
			}
			if (ShiftFound && AddFound) {
				// We found a tight hashing loop. Mark all the overflowing and underflowing opcodes as benign.
				//  NOTE: We could do loop-variant analysis to ensure that the shifted and added values are actually
				//  changing within the loop, but if they are not, they are probably not exploitable overflows anyway,
				//  and the loop-invariant overflow would happen on every loop iteration based on initial values, which
				//  is a pattern we have never seen for this kind of code.
				this->SetHasHashingCode(true);
				vector<SMPInstr *>::iterator ForwardInstIter;
				for (ForwardInstIter = CurrBlock->GetFirstInst(); ForwardInstIter != CurrBlock->GetLastInst(); ++ForwardInstIter) {
					CurrInst = (*ForwardInstIter);
					if (CurrInst->MDIsOverflowingOpcode() || CurrInst->MDIsUnderflowingOpcode() || CurrInst->MDIsLoadEffectiveAddressInstr()) {
						CurrInst->SetHashOperation();
					}
				}
			}
		} // end if loop header and loop tail
	} // end for all blocks

	NonEscapingRegisterHashes.clear();

	return;
} // end of SMPFunction::MarkSpecialNumericErrorCases()

// Use the SSA marker inst to record incoming arg types in member InArgTypes.
void SMPFunction::GatherIncomingArgTypes(void) {
	// Go through the DEFs in the SSA marker inst and see which of them are incoming args.
	list<SMPInstr *>::iterator MarkerInstIter = this->Instrs.begin();
	SMPInstr *MarkerInst = (*MarkerInstIter);
	unsigned short ArgCount = 0;
	if (MarkerInst->IsMarkerInst()) {
		std::size_t StandardByteSize = (MD_NORMAL_MACHINE_BITWIDTH / 8);
		STARSBitSet ArgsProcessed;
		ArgsProcessed.AllocateBits(STARS_MAX_ARGS_PER_FUNC);
		for (set<DefOrUse, LessDefUse>::iterator DefIter = MarkerInst->GetFirstDef(); DefIter != MarkerInst->GetLastDef(); ++DefIter) {
			STARSOpndTypePtr DefOp = DefIter->GetOp();
			std::size_t ArgIndex = 0;
			SMPOperandType ArgType = DefIter->GetType();
			if (IsEqType(ArgType, CODEPTR)) {
				this->HasInArgCodePointer = true;
			}
			else if (IsDataPtr(ArgType)) {
				this->HasInArgDataPointer = true;
			}
			if (global_STARS_program->GetSTARS_ISA_Bitwidth() == 64) {
				if (DefOp->IsRegOp()) {
					STARS_regnum_t RegNum = DefOp->GetReg();
					for (list<STARS_regnum_t>::iterator ArgRegIter = global_STARS_program->GetFirstArgumentReg(); ArgRegIter != global_STARS_program->GetLastArgumentReg(); ++ArgRegIter) {
						if (RegNum == (*ArgRegIter)) { // found an incoming argument
							++ArgCount;
							this->InArgTypes[ArgIndex] = (unsigned short) ArgType;
							if (ArgIndex > this->MaxInArgIndex) {
								this->MaxInArgIndex = ArgIndex;
							}
							if (global_STARS_program->ShouldSTARSShadowFuncPtrs()) {
								STARSOpndSetIter GlobIter = this->GlobalNames.find(DefOp);
								if (GlobIter != this->GlobalNames.end()) {
									unsigned int GlobNameIndex = ExtractGlobalIndex(*GlobIter);
									pair<unsigned int, size_t> InsertValue(GlobNameIndex, ArgIndex);
									pair<map<unsigned int, size_t>::iterator, bool> InsertResult = this->GlobalNameIndexMapToInArgIndex.insert(InsertValue);
									assert(InsertResult.second); // should never be already present

									// Map ArgIndex to operand.
									pair<size_t, STARSOpndTypePtr> ArgMapValue(ArgIndex, DefOp);
									pair<map<size_t, STARSOpndTypePtr>::iterator, bool> ArgMapInsertResult = this->InArgIndexMapToOperand.insert(ArgMapValue);
									assert(InsertResult.second); // should never be already present
								}
							}
							break;
						}
						++ArgIndex;
					}
				}
			}
			else { // 32 bits
				// Current model is writing outargs to stack. 
				if (MDIsDirectStackAccessOpnd(DefOp, this->UsesFramePointer())) {
					// Ensure that we are dealing with an incoming arg and not a USE-before-DEF local var.
					int SignedAddr = (int) DefOp->GetAddr();
					if (0 < SignedAddr) {
						ArgIndex = (DefOp->GetAddr() / StandardByteSize) - 1; // -1 to skip over return address
						if (ArgIndex < STARS_MAX_ARGS_PER_FUNC) {
							if (!ArgsProcessed.GetBit(ArgIndex)) {
								++ArgCount;
								ArgsProcessed.SetBit(ArgIndex);
								this->InArgTypes[ArgIndex] = (unsigned short) ArgType;
								if (ArgIndex > this->MaxInArgIndex) {
									this->MaxInArgIndex = ArgIndex;
								}
								if (global_STARS_program->ShouldSTARSShadowFuncPtrs()) {
									STARSOpndSetIter GlobIter = this->GlobalNames.find(DefOp);
									if (GlobIter != this->GlobalNames.end()) {
										unsigned int GlobNameIndex = ExtractGlobalIndex(*GlobIter);
										pair<unsigned int, size_t> InsertValue(GlobNameIndex, ArgIndex);
										pair<map<unsigned int, size_t>::iterator, bool> InsertResult = this->GlobalNameIndexMapToInArgIndex.insert(InsertValue);
										assert(InsertResult.second); // should never be already present

										// Map ArgIndex to operand.
										pair<size_t, STARSOpndTypePtr> ArgMapValue(ArgIndex, DefOp);
										pair<map<size_t, STARSOpndTypePtr>::iterator, bool> ArgMapInsertResult = this->InArgIndexMapToOperand.insert(ArgMapValue);
										assert(InsertResult.second); // should never be already present
									}
								}
							}
							else { // Not the first time to see arg; could be processing an arg one byte at a time, for example.
								// Do not increment ArgCount for duplicate processing. MaxInArgIndex does not need updating, either.
								bool TypeErrorFlag = false;
								this->InArgTypes[ArgIndex] = (unsigned short) SMPTypeMeet((SMPOperandType) this->InArgTypes[ArgIndex], ArgType, TypeErrorFlag);
								if (TypeErrorFlag) {
									SMP_msg("ERROR: TypeMeet error in GatherIncomingArgType() for func at %llx\n",
										(uint64_t) this->GetFirstFuncAddr());
								}
							}
						}
						else {
							SMP_msg("ERROR: Incoming ArgIndex of %u in SMPFunction::GatherIncomingArgTypes\n", ArgIndex);
						}
					}
				}
			}
		} // end for all DEFs in marker inst
	} // end if marker inst

	this->InArgCount = ArgCount;

	return;
} // end of SMPFunction::GatherIncomingArgTypes()

// Find all copies of incoming args and record in bitsets
void SMPFunction::TraceIncomingArgs(void) {
	// Start with tracing CODEPTR inargs.
	if (this->HasInArgCodePointer || this->HasInArgDataPointer) {
		// Allocate vector of bitsets to mark copies of InArgs.
		size_t GlobNameCount = this->NumGlobalNames();
		size_t SSALimit = this->GetMaxStackSSANum();
		if (SSALimit < (size_t) this->GetMaxRegSSANum()) {
			SSALimit = (size_t) this->GetMaxRegSSANum();
		}
		for (size_t index = 0; index < GlobNameCount; ++index) {
			STARSBitSet TempBitSet;
			TempBitSet.AllocateBits(SSALimit);
			this->InArgPointerCopies.push_back(TempBitSet);
		}
		for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
			CurrBlock->InitPointerCopies(this->MaxLocalSSANum);
		}
		this->TraceInArgPointers();
	}
	return;
} // end of SMPFunction::TraceIncomingArgs()

// helper for TraceIncomingArgs(), focused on CODEPTR and POINTER types
void SMPFunction::TraceInArgPointers(void) {
	// Mark the InArgs that are code or data pointers.
	vector<SMPInstr *>::iterator FirstInstIter = this->RPOBlocks[0]->GetFirstInst();
	SMPInstr *FirstInst = (*FirstInstIter);
	assert(NULL != FirstInst);
	assert(FirstInst->IsMarkerInst());
	for (size_t index = 0; index < this->NumGlobalNames(); ++index) {
		set<STARS_ea_t> EmptySet;
		this->InArgPointerCopyAddrs.push_back(EmptySet);
	}
	for (set<DefOrUse, LessDefUse>::iterator DefIter = FirstInst->GetFirstDef(); DefIter != FirstInst->GetLastDef(); ++DefIter) {
		SMPOperandType DefType = DefIter->GetType();
		if ((CODEPTR == DefIter->GetType()) || IsDataPtr(DefType)) {
			int DefSSANum = DefIter->GetSSANum();
			assert(0 == DefSSANum); // all DEFs in marker inst are SSA # zero
			STARSOpndTypePtr DefOp = DefIter->GetOp();
			STARSOpndSetIter GlobIter = this->GlobalNames.find(DefOp);
			unsigned int NameIndex;
			if (GlobIter != this->GlobalNames.end()) {
				NameIndex = ExtractGlobalIndex(*GlobIter);
				assert(NameIndex < this->InArgPointerCopies.size());
				this->InArgPointerCopies[NameIndex].SetBit(0); // set SSANum 0 bit
				pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->InArgPointerCopyAddrs[NameIndex].insert(STARS_SSA_MARKER_PSEUDO_ID);
				assert(InsertResult.second);
			}
			else { // block-local SSA name; rare for marker inst
				this->RPOBlocks[0]->SetInArgPointerCopy(DefOp, DefSSANum);
			}
		}
	}

	// Iterate through blocks in reverse post-order to mark copies.
	//  NOTE: Extend to more complicated cases with phi functions, etc., iterating until no more changes in the future. !!!!****!!!!
	for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
		assert(NULL != CurrBlock);
		// Iterate through instructions in the current block and find the simple copy RTLs.
		for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			STARSOpndTypePtr SourceOp;
			if (CurrInst->IsSimpleCopy(SourceOp)) {
				if ((nullptr != SourceOp) && (!SourceOp->IsVoidOp())) {
					bool SourceOpInArgCopy = false;
					STARSDefUseIter UseIter = CurrInst->FindUse(SourceOp);
					assert(UseIter != CurrInst->GetLastUse());
					int UseSSANum = UseIter->GetSSANum();

					STARSOpndSetIter GlobIter = this->GlobalNames.find(SourceOp);
					if (GlobIter != this->GlobalNames.end()) {
						unsigned int SourceNameIndex = ExtractGlobalIndex(*GlobIter);
						assert(SourceNameIndex < this->InArgPointerCopies.size());
						SourceOpInArgCopy = this->InArgPointerCopies[SourceNameIndex].GetBit(UseSSANum);
					}
					else { // local SSA name
						SourceOpInArgCopy = CurrBlock->IsInArgPointerCopy(SourceOp, UseSSANum);
					}

					if (SourceOpInArgCopy) {
						// We are copying an InArgPointerCopy to a DEF.
						STARSDefUseIter DefIter = CurrInst->GetFirstDef();
						assert(DefIter != CurrInst->GetLastDef());
						int DefSSANum = DefIter->GetSSANum();
						STARSOpndTypePtr DestOp = DefIter->GetOp();
						STARS_ea_t InstAddr = CurrInst->GetAddr();

						STARSOpndSetIter GlobIter = this->GlobalNames.find(DestOp);
						if (GlobIter != this->GlobalNames.end()) {
							unsigned int DestNameIndex = ExtractGlobalIndex(*GlobIter);
							assert(DestNameIndex < this->InArgPointerCopies.size());
							this->InArgPointerCopies[DestNameIndex].SetBit(DefSSANum);
							pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->InArgPointerCopyAddrs[DestNameIndex].insert(InstAddr);
							assert(InsertResult.second);
						}
						else { // DestOp is local SSA name
							CurrBlock->SetInArgPointerCopy(DestOp, DefSSANum);
						}
					}
				}
			} // end if IsSimpleCopy()
		} // end for all insts in block
	} // end for all blocks in RPOBlocks
	return;
} // end of SMPFunction::TraceInArgPointers()

// Fetch SCCP constant iter into block- or func-level constants. True only if const value.
bool SMPFunction::FindSCCPConstIter(SMPBasicBlock *CurrBlock, int RegHashValue, bool LocalName, STARSSCCPMapIter &ConstIter) {
	bool ConstSizeFound = false;
	if (LocalName) {
		ConstIter = CurrBlock->FindLocalConstValue(RegHashValue);
		if (ConstIter != CurrBlock->GetLastLocalConstValueIter()) {
			if (ConstIter->second.ConstType == STARS_CONST_HAS_VALUE) {
				ConstSizeFound = true;
			}
		}
	}
	else {
		ConstIter = this->FindConstValue(RegHashValue);
		if (ConstIter != this->GetLastConstValueIter()) {
			if (ConstIter->second.ConstType == STARS_CONST_HAS_VALUE) {
				ConstSizeFound = true;
			}
		}
	}
	return ConstSizeFound;
} // end of SMPFunction::GetLastConstValueIter()

// Analyze buffer sizes and the use of buffers by vulnerable library funcs.
void SMPFunction::AnalyzeBufferUses(void) {
	if (this->HasCallToMalloc() && (!this->MallocCallInArgsMap.empty()) && (!this->BufferCallInArgsMap.empty()))
	{
		// We have both mallocs and calls to vulnerable library funcs that use buffers.
		//  It is possible that there are mallocs of constant size buffers, and one or more
		//  constant size buffers could be used by a vulnerable library function like strcpy().
		//  As a preliminary step, trace back from each buffer pointer arg used by vulnerable
		//  library funcs to the ultimate DefAddr. This tracing goes through moves and would stop
		//  at a return value from a call, e.g. the return value from malloc().

		// Map from (ArgOp, LibCallAddr) to UltimateDefAddr.
		//  Hopefully, some of the UltimateDefAddr values will be addresses of malloc()
		//  calls and we can link them to the MallocCallInArgsMap we already built.
		//  We can pre-screen and keep only the UltimateDefAddr values that are malloc() calls
		//  and determine later whether those malloc(size) calls had a constant size.
		STARSDefinitionLinkSet PointerArgCallDefToUltimateDefLinkSet;
		map<STARS_ea_t, STARSDefinitionSet >::const_iterator BufferCallArgsIter;
		for (BufferCallArgsIter = this->BufferCallInArgsMap.cbegin(); BufferCallArgsIter != this->BufferCallInArgsMap.cend(); ++BufferCallArgsIter) {
			STARS_ea_t LibCallAddr = BufferCallArgsIter->first;
			// Iterate through each item in the STARSDefinitionSet for the LibCallAddr.
			const STARSDefinitionSet CurrDefnSet = BufferCallArgsIter->second;
			for (STARSDefnSetIter DefnIter = CurrDefnSet.cbegin(); DefnIter != CurrDefnSet.cend(); ++DefnIter) {
				STARS_ea_t ArgDefAddr = DefnIter->second;
				SMPInstr *ArgDefInst = this->GetInstFromAddr(ArgDefAddr);
				int ArgSSANum = ArgDefInst->GetFirstNonFlagsDef()->GetSSANum();
				STARSOpndTypePtr ArgOp = DefnIter->first;
				STARSOpndTypePtr DefMoveOp;
				bool LocalName = ArgDefInst->GetBlock()->IsLocalName(ArgOp);
				STARS_ea_t UltimateArgAddr = ArgDefInst->GetBlock()->GetUltimateDefAddr(ArgOp, ArgDefAddr, ArgSSANum, LocalName, true, true, DefMoveOp);
				// Is UltimateArgAddr a call malloc() inst?
				if (this->MallocCallInArgsMap.find(UltimateArgAddr) != this->MallocCallInArgsMap.cend()) {
					// Success. Record for later analysis of constant size arg to malloc(size).
					// Make a pseudo-definition of the ArgOp and the LibCallAddr. We won't
					//  care about the ArgDefAddr when emitting annotations.
					STARSDefinition PseudoArgDef(ArgOp, LibCallAddr);
					pair<STARSDefinition, STARS_ea_t> LinkValue(PseudoArgDef, UltimateArgAddr);
					pair<STARSDefinitionLinkSet::iterator, bool> InsertResult = PointerArgCallDefToUltimateDefLinkSet.insert(LinkValue);
					if (!InsertResult.second) {
						SMP_msg("SERIOUS WARNING: AnalyzeBufferUses DefnLink conflict at %llx\n", (uint64_t) UltimateArgAddr);
					}
				}
			}
		}

		// for each entry in MallocCallInArgsMap, see if the InArg to malloc
		//  (i.e. the size argument) is a constant detected in the SCCP analysis.
		for (map<STARS_ea_t, STARS_ea_t>::const_iterator MallocIter = this->MallocCallInArgsMap.cbegin();
			MallocIter != this->MallocCallInArgsMap.cend();
			++MallocIter) {
			STARS_ea_t SizeArgAddr = MallocIter->second;
			SMPInstr *SizeArgPassInst = this->GetInstFromAddr(SizeArgAddr);
			assert(nullptr != SizeArgPassInst);
			STARSDefUseIter SizeArgIter = SizeArgPassInst->GetFirstNonFlagsDef();
			STARSOpndTypePtr SizeArgOp = SizeArgIter->GetOp();
			if (SizeArgOp->IsRegOp()) { // should always be the case for x86-64
				STARSOpndTypePtr SearchOp = CloneIfSubwordReg(SizeArgOp);
				CanonicalizeOpnd(SearchOp);
				bool LocalName = SizeArgPassInst->GetBlock()->IsLocalName(SearchOp);
				int SizeArgHashValue = HashGlobalNameAndSSA(SearchOp, SizeArgIter->GetSSANum());
				STARSSCCPMapIter SizeConstIter;
				STARS_uval_t SizeValue;
				bool ConstSizeFound = this->FindSCCPConstIter(SizeArgPassInst->GetBlock(), 
					SizeArgHashValue, LocalName, SizeConstIter);
				if (ConstSizeFound) { // malloc() of constant size
					SizeValue = SizeConstIter->second.ConstValue;
					// See if any vulnerable lib func call uses the buffer allocated
					//  by this malloc().
					// We will do a linear search of the PointerArgCallDefToUltimateDefLinkSet, which
					//  should be small, as it only has links between vulnerable library call pointer
					//  args and malloc() return values.
					STARSDefinitionLinkSet::const_iterator LinkIter = PointerArgCallDefToUltimateDefLinkSet.cbegin();
					STARS_ea_t CurrMallocAddr = MallocIter->first;
					while (LinkIter != PointerArgCallDefToUltimateDefLinkSet.cend()) {						
						STARS_ea_t MallocCallAddr = LinkIter->second;
						if (CurrMallocAddr == MallocCallAddr) {
							// Matched. Keep track of info needed in annotations: the argument position,
							//  zero-based, and the constant buffer size, linked to the lib func call addr.
							STARS_ea_t LibFuncCallAddr = LinkIter->first.second;
							size_t ArgPos;
							STARS_regnum_t ArgRegNum = LinkIter->first.first->GetReg();
							STARSBufferCallArgMap::iterator BufferArgMapIter = this->BufferCallConstBufferArgsMap.find(LibFuncCallAddr);
							bool found = global_STARS_program->GetArgRegPosition(ArgRegNum, ArgPos);
							if (found) {
								pair<size_t, STARS_uval_t> InsertVal(ArgPos, SizeValue);
								if (BufferArgMapIter != this->BufferCallConstBufferArgsMap.end()) {
									// Already has entry with some other args. Insert into sorted list.
									list<pair<size_t, STARS_uval_t> >::iterator ArgListIter = BufferArgMapIter->second.begin();
									bool inserted = false;
									while (ArgListIter != BufferArgMapIter->second.end()) {
										size_t CurrArgPos = (*ArgListIter).first;
										if (CurrArgPos > ArgPos) {
											(void) BufferArgMapIter->second.insert(ArgListIter, InsertVal);
											inserted = true;
											break;
										}
										else if (CurrArgPos == ArgPos) {
											// Duplicate search for some reason?
											inserted = true;
											break;
										}
										++ArgListIter;
									} // end of while (ArgListIter != BufferArgMapIter->second.end())
									if (!inserted) {
										// Needs to go at the end of the list.
										BufferArgMapIter->second.push_back(InsertVal);
									}
								}
								else {
									// Create new entry.
									list<pair<size_t, STARS_uval_t> > NewList;
									NewList.push_back(InsertVal);
									this->BufferCallConstBufferArgsMap[LibFuncCallAddr] = NewList;
								}
							}
							else {
								SMP_msg("ERROR: Argument position not found in AnalyzeBufferUses, ArgRegNum: %d\n", (int) ArgRegNum);
							}
						}
						++LinkIter;
					} // end of while (LinkIter != PointerArgCallDefToUltimateDefLinkSet.cend())
				}
			}
		}
	}
	return;
} // end of SMPFunction::AnalyzeBufferUses()

// Remove non-global SSA names from RefSet.
void SMPFunction::RemoveLocalRefs(STARSDefUseSet &RefSet) {
	STARSDefUseIter RefIter = RefSet.begin();
	while (RefIter != RefSet.end()) {
		STARSOpndTypePtr RefOp = RefIter->GetOp();
		if (!this->IsGlobalName(RefOp)) {
			STARSDefUseIter NextRefIter = RefIter;
			++NextRefIter;
			RefSet.erase(RefIter);
			RefIter = NextRefIter;
		}
		else {
			++RefIter;
		}
	}

	return;
} // end of SMPFunction::RemoveLocalRefs()

// Is CriticalOp already represented in AlreadyShadowed set?
bool SMPFunction::IsAlreadyShadowed(const ShadowPoint &CriticalOp) {
	bool found = false;

	for (ShadowSet::const_iterator AlreadyIter = this->AlreadyShadowed.cbegin(); AlreadyIter != this->AlreadyShadowed.cend(); ++AlreadyIter) {
		if (AlreadyIter->first == CriticalOp.first) {
			// Addresses match. Check SSA numbers in case we have a Phi DEF pseudo-address (multiple USE SSA #s).
			assert(AlreadyIter->second < this->TempShadowList.GetSize());
			assert(CriticalOp.second < this->TempShadowList.GetSize());
			if (this->TempShadowList.GetRefNum(AlreadyIter->second)->GetSSANum() == this->TempShadowList.GetRefNum(CriticalOp.second)->GetSSANum()) {
				// Addresses and SSA numbers match. Check operands pointed to.
				if (IsEqOp(this->TempShadowList.GetRefNum(AlreadyIter->second)->GetOp(),
					this->TempShadowList.GetRefNum(CriticalOp.second)->GetOp())) {
					found = true;
					break;
				}
			}
		}
	}
	return found;
} // end of SMPFunction::IsAlreadyShadowed()

// Trace CriticalOp via copies back to ShadowAddr, return false if no valid means of shadowing it.
//  Add new CriticalOp shadow-checking points to NewCriticalOps, which will be shadowed with separate indices.
//  If TracingMemWrite, terminate early when we can prove that CriticalOp was unsafe to use as address reg in memory write.
bool SMPFunction::FindShadowingPoint2(const ShadowPoint CriticalOp, const bool TracingMemWrite, ShadowSet &ShadowAddrSet, bool &MemUnsafe, ShadowSet &NewCriticalOps, bool &NonConstSourceFound, set<STARS_uval_t> &ConstValues) {
	bool UnsafeChain = false;
	STARS_ea_t ShadowCheckAddr = CriticalOp.first;
	STARSOpndTypePtr ShadowCheckUseOp = this->TempShadowList.GetRefNum(CriticalOp.second)->GetOp();

	bool DataFlowOpnd = MDIsDataFlowOpnd(ShadowCheckUseOp, this->UsesFramePointer());
	bool Duplicate = this->IsAlreadyShadowed(CriticalOp);
	if (TracingMemWrite && (!DataFlowOpnd))
		return false;
	else if ((!DataFlowOpnd) || Duplicate) {
		// end the recursion.
		bool ImmedOp = ShadowCheckUseOp->IsImmedOp();
		if (!Duplicate && (!ImmedOp))
			NonConstSourceFound = true;
		else if (ImmedOp) {
			// Save in the set of constant values we traced back to.
			pair<set<STARS_uval_t>::iterator, bool> InsertResult = ConstValues.insert(ShadowCheckUseOp->GetImmedValue());
			assert(InsertResult.first != ConstValues.end());
		}
		return true;  // if recursion has not started, ShadowAddrSet will be empty and no annotations will be emitted.
	}
	else {
		this->AlreadyShadowed.insert(CriticalOp);
	}

	bool ValidShadowing = true;
	bool ShadowPointFound = false;
	STARSOpndTypePtr UseOp = ShadowCheckUseOp;
	bool UseFP = this->UsesFramePointer();

	do {
		int CheckBlockNum;
		if (STARS_IsBlockNumPseudoID(ShadowCheckAddr)) {
			CheckBlockNum = STARS_GetBlockNumFromPseudoID(ShadowCheckAddr);
		}
		else {
			CheckBlockNum = this->GetBlockNumFromInstAddr(ShadowCheckAddr);
		}
		assert(0 <= CheckBlockNum);
		assert(CheckBlockNum < (decltype(CheckBlockNum))this->RPOBlocks.size());
		SMPBasicBlock *CurrBlock = this->RPOBlocks[(size_t) CheckBlockNum];

		bool LocalName = (!this->IsGlobalName(UseOp));
		STARS_ea_t CurrShadowAddr = CurrBlock->GetDefAddrFromUseAddr(UseOp, ShadowCheckAddr, this->TempShadowList.GetRefNum(CriticalOp.second)->GetSSANum(), LocalName);
		if (STARS_BADADDR == CurrShadowAddr) {
			ValidShadowing = false;
			break; // cannot follow chain any higher, but previous chain info is still valid.
		}
		if (STARS_IsBlockNumPseudoID(CurrShadowAddr)) { // must have a Phi DEF
			int ShadowBlockNum = STARS_GetBlockNumFromPseudoID(CurrShadowAddr);
			assert((0 <= ShadowBlockNum) && ((decltype(ShadowBlockNum))this->GetNumBlocks() > ShadowBlockNum));
			CurrBlock = this->RPOBlocks[(size_t) ShadowBlockNum];
			PhiSetIter PhiIter = CurrBlock->FindPhi(UseOp);
			assert(PhiIter != CurrBlock->GetLastPhi());
			// For each Phi USE, recurse.
			ShadowSet TempShadowAddrSet; // only union these into ShadowAddrSet if all Phi USE recursions succeed.

			for (size_t PhiIndex = 0; PhiIndex < PhiIter->GetPhiListSize(); ++PhiIndex) {
				int PhiUseSSANum = PhiIter->GetUseSSANum(PhiIndex);
				if (0 > PhiUseSSANum) {
					SMP_msg("ERROR: FindShadowingPoint2() cannot trace uninitialized Phi USE at CurrShadowAddr %p in func at %p\n",
						CurrShadowAddr, this->GetFirstFuncAddr());
					ValidShadowing = false;
					break;
				}
				this->TempShadowList.SetRef(UseOp->clone(), PhiIter->GetUseType(PhiIndex), PhiUseSSANum);
				ShadowPoint PhiUsePoint(CurrShadowAddr, this->TempShadowList.GetSize() - 1);
				ValidShadowing = this->FindShadowingPoint2(PhiUsePoint, TracingMemWrite, TempShadowAddrSet, MemUnsafe, NewCriticalOps, NonConstSourceFound, ConstValues);
				if (!ValidShadowing) {
					break;  // we must succeed on all Phi USEs
				}
			}
			if (ValidShadowing) { // All Phi USEs succeeded
				//  Remove current CriticalOp from ShadowAddrSet (if present); let the PhiUseOps logically take its place
				ShadowSet::iterator OldIter = ShadowAddrSet.find(CriticalOp);
				if (OldIter != ShadowAddrSet.end()) { // found it
					OldIter = ShadowAddrSet.erase(OldIter);
				}
				// Insert the shadow points for all the Phi USEs now that all Phi USE recursions succeeded.
#if 0
				ShadowAddrSet.insert(TempShadowAddrSet.begin(), TempShadowAddrSet.end());
#else
				for (ShadowSet::const_iterator TempIter = TempShadowAddrSet.cbegin(); TempIter != TempShadowAddrSet.cend(); ++TempIter) {
					pair<ShadowSet::iterator, bool> InsertResult = ShadowAddrSet.insert(*TempIter);
				}
#endif
			}
			break; // success if ValidShadowing is true, failure otherwise
		}
		else if (STARS_IsSSAMarkerPseudoID(CurrShadowAddr)) {
			NonConstSourceFound = true;
			if (MDIsStackAccessOpnd(UseOp, this->UsesFramePointer())) {
				// We need to guard against the corner case of a stack location that is
				//  apparently used before it is defined. We cannot emit FPTRSHADOW [RSP-128]
				//  at the top of the function, as that is a garbage value before the stack frame
				//  is allocated.
				int SignedOffset = (int) UseOp->GetAddr();
				if (SignedOffset < 0) {
					ValidShadowing = false;
					break;
				}
			}
			list<SMPInstr *>::iterator FirstInstIter = this->GetFirstInstIter();
			SMPInstr *CurrInst = (*FirstInstIter);
			STARSDefUseIter NewDefIter = CurrInst->FindDef(UseOp);
			assert(NewDefIter != CurrInst->GetLastDef());
			if (CurrInst->IsLastInBlock()) {
				// Bizarre corner case that might never happen; first block has only SSA marker. We cannot shadow here.
				ValidShadowing = false;
				break;
			}
			// We want to replace our current CriticalOp with the DEF at the addr of the inst after the SSA marker.
			//  Remove current CriticalOp from ShadowAddrSet (if present) and add new DEF in its place.
			ShadowSet::iterator OldIter = ShadowAddrSet.find(CriticalOp);
			if (OldIter != ShadowAddrSet.end()) { // found it
				OldIter = ShadowAddrSet.erase(OldIter);
			}
			++FirstInstIter; // move to first real inst
			CurrShadowAddr = (*FirstInstIter)->GetAddr();
			this->TempShadowList.SetRef(NewDefIter->GetOp(), NewDefIter->GetType(), NewDefIter->GetSSANum());
			ShadowPoint NextCriticalOp(CurrShadowAddr, this->TempShadowList.GetSize() - 1);

			// If we are tracing, see if chain is maybe-aliased.
			if (NewDefIter->HasIndirectWrite()) {
				MemUnsafe = true;
			}

			pair<ShadowSet::iterator, bool> InsertResult = ShadowAddrSet.insert(NextCriticalOp);
			if (InsertResult.second) { // not already shadowing that DEF
				ShadowPointFound = true;
			}
			// We are finished following the chain. Cannot go higher than the SSA Marker inst.
			break;
		}
		else { // must be an instruction address
			SMPInstr *CurrInst = this->GetInstFromAddr(CurrShadowAddr);
			assert(NULL != CurrInst);
			if (DEFAULT != CurrInst->GetDataFlowType()) {
				ValidShadowing = false;
				break; // cannot follow chain any higher, but previous chain info is still valid.
			}
			STARSOpndTypePtr NewUseOp = nullptr;
			// If we have a conditional move instruction, then there will be two USEs. One is the
			//  operand that will be copied to the DEF if the condition is true, and the other is
			//  the operand that represents the unchanged DEF if the copy does not happen, e.g.
			//  if (guard) RDX5 := RAX3;  will have USEs RAX3 and RDX4, where RDX4 is the value
			//  that becomes RDX5 when the guard is not true. We need to treat this instruction as
			//  a Phi DEF with two Phi USEs and trace both USEs.
			if (CurrInst->MDIsConditionalMoveInstr()) {
				// For each USE other than the flags register, recurse.
				ShadowSet TempShadowAddrSet; // only union these into ShadowAddrSet if all Phi USE recursions succeed.
				for (STARSDefUseIter UseIter = CurrInst->GetFirstUse(); UseIter != CurrInst->GetLastUse(); ++UseIter) {
					int UseSSANum = UseIter->GetSSANum();
					if (0 > UseSSANum) {
						SMP_msg("ERROR: FindShadowingPoint2() cannot trace uninitialized CondMove USE at CurrShadowAddr %p in func at %p\n",
							CurrShadowAddr, this->GetFirstFuncAddr());
						ValidShadowing = false;
						break;
					}
					STARSOpndTypePtr CondMoveUseOp = UseIter->GetOp()->clone();
					bool FlagsReg = (CondMoveUseOp->IsRegOp() && (CondMoveUseOp->MatchesReg(MD_FLAGS_REG)));
					if (!FlagsReg) {
						this->TempShadowList.SetRef(CondMoveUseOp, UseIter->GetType(), UseSSANum);
						ShadowPoint UsePoint(CurrShadowAddr, this->TempShadowList.GetSize() - 1);
						ValidShadowing = this->FindShadowingPoint2(UsePoint, TracingMemWrite, TempShadowAddrSet, MemUnsafe, NewCriticalOps, NonConstSourceFound, ConstValues);
						if (!ValidShadowing) {
							break;  // we must succeed on all conditional move USEs
						}
					}
				}
				if (ValidShadowing) { // All USEs succeeded
					//  Remove current CriticalOp from ShadowAddrSet (if present); let the UseOps logically take its place
					ShadowSet::iterator OldIter = ShadowAddrSet.find(CriticalOp);
					if (OldIter != ShadowAddrSet.end()) { // found it
						OldIter = ShadowAddrSet.erase(OldIter);
					}
					// Insert the shadow points for all the USEs now that all USE recursions succeeded.
#if 0
					ShadowAddrSet.insert(TempShadowAddrSet.begin(), TempShadowAddrSet.end());
#else
					for (ShadowSet::const_iterator TempIter = TempShadowAddrSet.cbegin(); TempIter != TempShadowAddrSet.cend(); ++TempIter) {
						pair<ShadowSet::iterator, bool> InsertResult = ShadowAddrSet.insert(*TempIter);
					}
#endif
				}
				break; // success if ValidShadowing is true, failure otherwise
			}
			else if (!CurrInst->IsSimpleCopy(NewUseOp)) {
				NonConstSourceFound = true;
				// We could have something like [rsp+32] := [rsp+32] + 8.
				//  The DEF is the DEF of our CriticalOp, and we cannot trace any farther for this shadowing index.
				//  But the USE of the same operand could be a vulnerable DEF-USE chain that needs its own
				//  shadowing index.
				// We need to shadow the DEF right AFTER this instruction and then add a new chain to NewCriticalOps
				//  starting with the USE.

				// First, if we are tracing memory writes for safety, the chain has become unanalyzeable for safety
				//  due to the non-copy operation, unless it is a stack pointer copy.
				if (TracingMemWrite) {
					if (CurrInst->MDIsStackPointerCopy(this->UsesFramePointer())) {
						// Stack pointer copy, but not a simple copy. Must be load-effective-address copy.
						STARSOpndTypePtr LeaMemOp = CurrInst->GetLeaMemUseOp();
						if ((nullptr != LeaMemOp) && (!LeaMemOp->IsVoidOp())) {
							ValidShadowing = MDIsDirectStackAccessOpnd(LeaMemOp, this->UsesFramePointer());
						}
						else {
							ValidShadowing = true;
						}
					}
					else {
						ValidShadowing = false;
					}
					break;
				}
				if (CurrInst->IsLastInBlock()) {
					// Cannot insert shadowing instrumentation before next inst because next inst is reachable by
					//  some path other than fall-through from the current inst. DEF cannot be shadowed.
					ValidShadowing = false;
					break; // exit; we will shadow, if possible, just not as far as this DEF.
				}
				CurrBlock = CurrInst->GetBlock();  // move from ShadowCheckAddr up to CurrShadowAddr
				vector<SMPInstr *>::iterator NextInstIter = CurrBlock->GetInstIterFromAddr(CurrShadowAddr);
				assert(NextInstIter != CurrBlock->GetLastInst());
				++NextInstIter;
				assert(NextInstIter != CurrBlock->GetLastInst());

				//  Remove current CriticalOp from ShadowAddrSet (if present) and add UseOp in its place.
				ShadowSet::iterator OldIter = ShadowAddrSet.find(CriticalOp);
				if (OldIter != ShadowAddrSet.end()) { // found it
					OldIter = ShadowAddrSet.erase(OldIter);
				}
				STARSDefUseIter ShadowDefIter = CurrInst->FindDef(UseOp);
				assert(ShadowDefIter != CurrInst->GetLastDef());
				STARS_ea_t NextInstAddr = (*NextInstIter)->GetAddr();
				this->TempShadowList.SetRef(ShadowDefIter->GetOp(), ShadowDefIter->GetType(), ShadowDefIter->GetSSANum());
				pair<ShadowSet::iterator, bool> InsertResult = ShadowAddrSet.insert(ShadowPoint(NextInstAddr, this->TempShadowList.GetSize() - 1));
				if (!InsertResult.second) { // already following that chain
					break;  // nothing new to trace
				}
				ShadowPointFound = true;
				// If we are tracing, see if chain is maybe-aliased.
				if (ShadowDefIter->HasIndirectWrite()) {
					MemUnsafe = true;
				}
				// If we are starting a NewCriticalOps chain, we need to start with a USE of the same SSA name as the DEF.
				STARSDefUseIter NewUseIter = CurrInst->FindUse(UseOp);
				if (NewUseIter != CurrInst->GetLastUse()) {
					// We have a starting point for a new shadowing operation.
					this->TempShadowList.SetRef(NewUseIter->GetOp(), NewUseIter->GetType(), NewUseIter->GetSSANum());
					ShadowPoint NewCritOp(CurrShadowAddr, this->TempShadowList.GetSize() - 1);
					if (!this->IsAlreadyShadowed(NewCritOp)) {
						pair<ShadowSet::iterator, bool> InsertResult = NewCriticalOps.insert(NewCritOp);
						// We don't care whether that was a new insertion or not.
					}
				}
				break; // done with current chain
			}
			else { // simple copy instruction
				assert(nullptr != NewUseOp);
				if (NewUseOp->IsRegOp()) {
					CanonicalizeOpnd(NewUseOp);
				}
				STARSDefUseIter DefIter = CurrInst->GetFirstDef();
				assert(DefIter != CurrInst->GetLastDef());

				// In the mem-write tracing case, if we copied from a memory op that is not just a direct single-addr-reg or stack
				//  access, then we cannot analyze the chain any further. If we copied from a loop-variant value,
				//  then the memory write is unsafe.
				if (TracingMemWrite) {
					if (this->IsBlockInAnyLoop(CurrInst->GetBlock()->GetNumber()) && (!DefIter->IsInvariantForAllLoops())) {
						ValidShadowing = false;
						break;
					}
					else if (NewUseOp->IsMemOp()) {
						if (!MDIsDirectStackAccessOpnd(NewUseOp, this->UsesFramePointer())) {
							// The TracingMemWrite case differs from the shadowing case here.
							//  The shadowing case would try (and fail) to trace [eax] while the
							//  TracingMemWrite case is concerned with tracing eax, not [eax]. If eax is a 
							//  loop-invariant value and satisfies the other criteria for memory
							//  write safety, then it should point to the return address only if
							//  it is a copy of an unsafe pointer that was stored to memory. That
							//  other unsafe pointer will be detected elsewhere in the function, making
							//  the whole function unsafe, which is all that matters in the TracingMemWrite
							//  case. So, static mem ops and phrase ops with no offset and no unsafety
							//  in their address regs should be traced through their address regs.
#define STARS_AGGRESSIVE_MEM_TRACING 1
#if STARS_AGGRESSIVE_MEM_TRACING
							if (NewUseOp->IsStaticMemOp()) {
								NonConstSourceFound = true;
								ValidShadowing = true;
								break;
							}
							else if (NewUseOp->IsMemNoDisplacementOp()) {
								int BaseReg, IndexReg;
								uint16_t ScaleFactor;
								STARS_ea_t DummyOffset = 0;
								NewUseOp->MDExtractAddressFields(BaseReg, IndexReg, ScaleFactor, DummyOffset);
								assert(0 == DummyOffset);
								bool SingleAddressReg = ((IndexReg == STARS_x86_R_none) != (BaseReg == STARS_x86_R_none)); // logical XOR
								if (SingleAddressReg && (0 == ScaleFactor)) {
									STARS_regnum_t AddressReg;
									if (IndexReg == STARS_x86_R_none) {
										AddressReg = MDCanonicalizeSubReg((STARS_regnum_t) BaseReg);
									}
									else {
										AddressReg = MDCanonicalizeSubReg((STARS_regnum_t) IndexReg);
									}
									NewUseOp = CurrInst->MakeRegOpnd(AddressReg); // fall through to code to analyze NewUseOp
								}
								else {
									ValidShadowing = false;
									break;
								}
							}
#else
							ValidShadowing = false;
							break;
#endif
						}
					}
				}

				STARSDefUseIter NewUseIter = CurrInst->FindUse(NewUseOp);
				assert(NewUseIter != CurrInst->GetLastUse());
				// We want to replace our current CriticalOp with the NewUseOp at this addr.
				//  Remove current CriticalOp from ShadowAddrSet (if present) and add NewUseOp in its place.
				ShadowSet::iterator OldIter = ShadowAddrSet.find(CriticalOp);
				if (OldIter != ShadowAddrSet.end()) { // found it
					OldIter = ShadowAddrSet.erase(OldIter);
				}
				this->TempShadowList.SetRef(NewUseIter->GetOp(), NewUseIter->GetType(), NewUseIter->GetSSANum());
				ShadowPoint NextCriticalOp(CurrShadowAddr, this->TempShadowList.GetSize() - 1);
				pair<ShadowSet::iterator, bool> InsertResult = ShadowAddrSet.insert(NextCriticalOp);
				if (!InsertResult.second) { // already following that chain
					break;  // nothing new to trace
				}
				ShadowPointFound = true;
				// If we are tracing, see if chain is maybe-aliased.
				if (DefIter->HasIndirectWrite()) {
					MemUnsafe = true;
				}
				if (!MDIsDataFlowOpnd(NewUseOp, this->UsesFramePointer())) {
					// We are finished following the chain.
					if (!(NewUseOp->IsImmedOp())) {
						NonConstSourceFound = true;
					}
					else {
						// Save in the set of constant values we traced back to.
						pair<set<STARS_uval_t>::iterator, bool> InsertResult = ConstValues.insert(NewUseOp->GetImmedValue());
						assert(InsertResult.first != ConstValues.end());
					}
					break;
				}
				else { // recurse on NextCriticalOp
					ValidShadowing = this->FindShadowingPoint2(NextCriticalOp, TracingMemWrite, ShadowAddrSet, MemUnsafe, NewCriticalOps, NonConstSourceFound, ConstValues);
					break; // recursion handled rest of the chain
				}
			} // end if not SimpleCopy else ...
		} // end if blocknum else if SSA marker inst else [regular inst addr]
	} while (ValidShadowing);

	// Success if we found any shadow points before chain had to terminate in non-mem-write case.
	//  In mem-write case, must be ValidShadowing.
	bool success = TracingMemWrite ? ValidShadowing : (ValidShadowing || ShadowPointFound);
	if (!success) {
		NonConstSourceFound = true;
	}
	return success;
} // end of SMPFunction::FindShadowingPoint2()

// Try to find safe indirect memory writes
bool SMPFunction::AnalyzeMemWriteSafety(void) {
	bool FoundSafeMemWrites = false;
	for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
		SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
		if (CurrBlock->HasIndirectMemWrite()) {
			// Find all instructions with indirect mem writes.
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				bool LoopingWrite = CurrInst->MDHasAnyRepeatPrefix();
				if (CurrInst->HasIndirectMemoryWrite() && (!LoopingWrite)) {
					STARS_ea_t InstAddr = CurrInst->GetAddr();
					STARSOpndTypePtr DefOp = CurrInst->GetMemDef();
					int BaseReg, IndexReg;
					uint16_t ScaleFactor;
					STARS_ea_t offset;
					MDExtractAddressFields(DefOp, BaseReg, IndexReg, ScaleFactor, offset);
					// In order to be safe, we need to start conservatively with a single address reg
					//  that is not scaled and has no offset other than a stack offset.
					//  We want to be less conservative for StaticMemOps, e.g. 0x8049170[ebx]; if we can
					//  prove that [ebx] would be safe, consider the whole operand safe.
					bool SingleAddressReg = ((IndexReg == STARS_x86_R_none) != (BaseReg == STARS_x86_R_none)); // logical XOR
					if (SingleAddressReg && (0 == ScaleFactor)) {
						bool DirectStackOperand = MDIsDirectStackAccessOpnd(DefOp, this->UsesFramePointer());
						if (!DirectStackOperand) {
#if 0
							bool NonStaticOffset = (offset != 0) && (!DefOp->IsStaticMemOp());
							bool NonStackOffset = NonStaticOffset && (!DirectStackOperand);
							if (!NonStackOffset) {
#endif
								// It is possible that the memory write is safe. We must trace its address reg back
								//  to simple copies of loop-invariant DEFs or to an incoming arg to declare it safe
								//  from overwriting our return address on the stack.
								STARS_regnum_t AddressReg;
								if (IndexReg == STARS_x86_R_none) {
									AddressReg = MDCanonicalizeSubReg((STARS_regnum_t) BaseReg);
								}
								else {
									AddressReg = MDCanonicalizeSubReg((STARS_regnum_t) IndexReg);
								}
								STARSOpndTypePtr UseOp = CurrInst->MakeRegOpnd(AddressReg);
								STARSDefUseIter UseIter = CurrInst->FindUse(UseOp);
								assert(UseIter != CurrInst->GetLastUse());
								this->TempShadowList.clear();
								this->TempShadowList.SetRef(UseOp, UseIter->GetType(), UseIter->GetSSANum());
								STARS_ea_t ShadowCheckAddr = CurrInst->GetAddr();
								ShadowPoint CriticalOp(ShadowCheckAddr, 0); // Checking point at indirect write
								list<ShadowPoint> WorkList;
								WorkList.push_back(CriticalOp);
								this->AlreadyShadowed.clear();
								ShadowSet ShadowUses;      // dummy set of addr+USE pairs to shadow values that will be checked at ShadowCheckAddr
								ShadowSet NewCriticalOps;  // dummy worklist of new shadow USEs and their addresses
								bool MemUnsafe = false;
								bool NonConstSourceFound = false;
								set<STARS_uval_t> ConstValues;
								bool SafeWrite = this->FindShadowingPoint2(CriticalOp, true, ShadowUses, MemUnsafe, NewCriticalOps, NonConstSourceFound, ConstValues);
								if (SafeWrite) {
									assert(NewCriticalOps.empty());  // FindShadowingPoint2() should stop before adding to this set
									STARSDefUseIter DefIter = CurrInst->FindDef(DefOp);
									assert(DefIter != CurrInst->GetLastDef());
									DefIter = CurrInst->SetDefSafeMemWrite(DefOp);
									assert(DefIter != CurrInst->GetLastDef());
									FoundSafeMemWrites = true;
									SMP_msg("INFO: Found safe indirect write in AnalyzeMemWriteSafety at %p\n", InstAddr);
								}
#if 0
							}
#endif
						}
					}
				} // end if inst has indirect mem write
			} // end for all instructions in block
		} // end if block has indirect mem write
	} // end for all blocks in function

	return FoundSafeMemWrites;
} // end of SMPFunction::AnalyzeMemWriteSafety()

// Determine inst ID set that func could return to, including tail call issues; return true if set changes
bool SMPFunction::ComputeReturnTargets(bool FirstIteration) {
	bool changed = false;
	if ((!(this->FuncReturnsToCaller())) || this->IsLinkerStub()) {
		this->ReturnTargetsComputed = true;
	}
	else if (!this->ReturnTargetsComputed) {
		bool IncompleteCaller = false;
		if (FirstIteration) {
			for (set<STARS_ea_t>::const_iterator CallSitesIter = this->AllCallSites.cbegin(); CallSitesIter != this->AllCallSites.cend(); ++CallSitesIter) {
				// Get the fall-through from the calling inst
				STARS_ea_t CallAddr = (*CallSitesIter);
				// Weed out recursive calls that are jumps or branches.
				int CallBlockNum = this->GetBlockNumFromInstAddr(CallAddr);
				if (0 <= CallBlockNum) {
					// CallAddr is in the current function.
					SMPInstr *CallInst = this->RPOBlocks[CallBlockNum]->FindInstr(CallAddr);
					assert(NULL != CallInst);
					if (CallInst->IsJumpOrBranchInstr()) {
						continue; // internal jump or branch recursion; no return points
					}
				}
				// Get Func info of the caller.
				STARS_Function_t *FuncInfo = SMP_get_func(CallAddr);
				if (NULL == FuncInfo) {
					SMP_msg("SERIOUS WARNING: ComputeReturnTargets: Call location %llx not in a function.\n",
						(unsigned long long) CallAddr);
					continue;
				}
				// Get first addr in func and map to SMPFunction *.
				STARS_ea_t FirstAddrOfCaller = FuncInfo->get_startEA();
				SMPFunction *CallingFunc = this->GetProg()->FindFunction(FirstAddrOfCaller);
				assert(nullptr != CallingFunc);
				SMPInstr *CallingInst = CallingFunc->GetInstFromAddr(CallAddr);
				// Tail calls from foo() to bar() lead to all return targets of foo() being
				//  added to bar()'s return targets. 
				bool TailCallFlag = CallingInst->IsTailCall();
				if (TailCallFlag) {
					// Get return targets of tail caller, as we return to them.
					if (CallingFunc->AreReturnTargetsComputed()) {
						const set<STARS_ea_t> CallerReturnTargets = CallingFunc->GetReturnTargets();
						set<STARS_ea_t>::const_iterator TargIter = CallerReturnTargets.cbegin();
						while (TargIter != CallerReturnTargets.cend()) {
							pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->ReturnTargets.insert(*TargIter);
							++TargIter;
							changed |= InsertResult.second;
							InsertResult = this->TailReturnTargets.insert(*TargIter);
						}
					}
					else {
						IncompleteCaller = true;
						UnresolvedCallers.insert(CallingFunc);
					}
					if (CallingFunc->IsPossibleIndirectCallTarget()) {
						IncompleteCaller = true;
						UnresolvedCallers.insert(CallingFunc);
					}
					if (CallingFunc->IsCalledFromOrphanedCode() || CallingFunc->IsTailCallChainFromOrphanedCode()) {
						this->SetIsTailCallChainFromOrphanedCode();
					}
				}
				else {
					STARS_ea_t FallThroughAddr = CallingInst->GetFallThroughAddr();
					bool BadFallThrough = ((STARS_BADADDR == FallThroughAddr) || (STARS_LIVEIN_PSEUDO_ID == FallThroughAddr));
					if (BadFallThrough) { // error case; could not find fall-through
						this->ReturnTargetsComputed = true;
						UnresolvedCallers.clear();
						return false;
					}
					else { // Got good FallThroughAddr
						pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->ReturnTargets.insert(FallThroughAddr);
						changed |= InsertResult.second;
					}
				}
			}
		}
		else { // not FirstIteration; just look at unresolved tail call sites
			set<SMPFunction *>::iterator TailCallIter = UnresolvedCallers.begin();
			while (TailCallIter != UnresolvedCallers.end()) {
				SMPFunction *CallingFunc = (*TailCallIter);
				// Get return targets of tail caller, as we return to them.
				if (CallingFunc->AreReturnTargetsComputed()) {
					const set<STARS_ea_t> CallerReturnTargets = CallingFunc->GetReturnTargets();
					set<STARS_ea_t>::const_iterator TargIter = CallerReturnTargets.cbegin();
					while (TargIter != CallerReturnTargets.cend()) {
						pair<set<STARS_ea_t>::iterator, bool> InsertResult = this->ReturnTargets.insert(*TargIter);
						++TargIter;
						changed |= InsertResult.second;
					}
					TailCallIter = UnresolvedCallers.erase(TailCallIter);
				}
				else {
					IncompleteCaller = true;
					++TailCallIter;
				}
				if (CallingFunc->IsPossibleIndirectCallTarget()) {
					IncompleteCaller = true;
					UnresolvedCallers.insert(CallingFunc);
				}
				if (CallingFunc->IsCalledFromOrphanedCode() || CallingFunc->IsTailCallChainFromOrphanedCode()) {
					this->SetIsTailCallChainFromOrphanedCode();
				}
			}
		}
		this->ReturnTargetsComputed = (!IncompleteCaller);
	}
	else {
		UnresolvedCallers.clear();
	}

	return changed;
} // SMPFunction::ComputeReturnTargets()

// Emit all annotations for the function, including all per-instruction
//  annotations.
void SMPFunction::EmitAnnotations(FILE *AnnotFile, FILE *InfoAnnotFile) {
	// Emit annotation for the function as a whole.
	list<SMPBasicBlock *>::iterator BlockIter;
	SMPBasicBlock *CurrBlock;
	bool FuncHasProblems = ((!this->AnalyzedSP) || (!this->HasGoodRTLs()) || (this->HasUnresolvedIndirectCalls())
		|| (this->HasUnresolvedIndirectJumps()) || (this->HasSharedChunks()));

	if (this->StaticFunc) {
		SMP_fprintf(AnnotFile,	"%18llx %6zu FUNC LOCAL  %s ", (unsigned long long) this->GetStartAddr(),
			this->Size, this->GetFuncName());
	}
	else {
		SMP_fprintf(AnnotFile,	"%18llx %6zu FUNC GLOBAL %s ", (unsigned long long) this->GetStartAddr(),
			this->Size, this->GetFuncName());
	}
	switch (this->GetReturnAddressStatus())
	{
		case FUNC_UNKNOWN:
		{
			SMP_fprintf(AnnotFile, "FUNC_UNKNOWN ");
			break;
		}
		case FUNC_SAFE:
		{
			SMP_fprintf(AnnotFile, "FUNC_SAFE ");
			break;
		}
		case FUNC_UNSAFE:
		{
			SMP_fprintf(AnnotFile, "FUNC_UNSAFE ");
			break;
		}
		default:
			assert(0);	
	}
	if (this->UseFP) {
		SMP_fprintf(AnnotFile, "USEFP ");
	}
	else {
		SMP_fprintf(AnnotFile, "NOFP ");
	}
	if (this->FuncInfo->HasReturnPoints() /*does_return()*/) {
		SMP_fprintf(AnnotFile, "RET ");
	}
	else {
		SMP_fprintf(AnnotFile, "NORET ");
	}

	if (this->IsLeaf())
		SMP_fprintf(AnnotFile, "FUNC_LEAF ");
	// Store the first return instruction's address
	// NOTE: This was useless info and we just print an address to keep the line format the same for easy parsing.
	SMP_fprintf(AnnotFile,"%18llx ", (unsigned long long) (this->FuncInfo->get_startEA() + this->FuncInfo->GetFuncSize() - 1));

	if (this->IsLibFunc())
		SMP_fprintf(AnnotFile, "LIBRARY ");
	SMP_fprintf(AnnotFile, "\n");

	// Emit annotations about how to restore register values
	SMP_fprintf(AnnotFile, "%18llx %6d FUNC FRAMERESTORE ", (unsigned long long) this->GetStartAddr(), 0);
	for (int i = STARS_x86_R_ax; i <= global_STARS_program->GetSTARS_MD_LAST_SAVED_REG_NUM(); i++) {
		SMP_fprintf(AnnotFile, "%d %d %d ", i, this->SavedRegLoc[i], this->ReturnRegTypes[i]);
	}
	SMP_fprintf(AnnotFile, "ZZ\n");

	// Print type left in the return register.
	if (MD_RETURN_VALUE_REG != STARS_x86_R_none) {
		SMP_fprintf(InfoAnnotFile, "%18llx %6u FUNC RETURNTYPE ", (unsigned long long) this->GetStartAddr(), this->Size);
		SMP_fprintf(InfoAnnotFile, "%s %d\n", MDGetRegNumName(MD_RETURN_VALUE_REG, global_STARS_program->GetSTARS_ISA_Bytewidth()), this->ReturnRegTypes[MD_RETURN_VALUE_REG]);
	}
	// Print types of incoming arguments, if any.
	if (this->GetIncomingArgCount() > 0) {
		std::size_t ArgIndex;
		std::size_t ArgLimit = this->MaxInArgIndex;
		assert((this->MaxInArgIndex + 1) >= this->InArgCount);
		if (ArgLimit >= STARS_MAX_ARGS_PER_FUNC) {
			ArgLimit = STARS_MAX_ARGS_PER_FUNC - 1;
		}
		SMP_fprintf(InfoAnnotFile, "%18llx %6u FUNC INARGS %4u  ", (unsigned long long) this->GetStartAddr(), this->Size,
			ArgLimit + 1);
		for (ArgIndex = 0; ArgIndex <= ArgLimit; ++ArgIndex) {
			SMP_fprintf(InfoAnnotFile, "ARG%u %u ", ArgIndex, this->InArgTypes[ArgIndex]);
		}
		SMP_fprintf(InfoAnnotFile, "\n");
	}

	SMP_fprintf(AnnotFile, "%18llx %6d FUNC MMSAFENESS ", (unsigned long long) this->GetStartAddr(), 0);
	if (!IsSpecSafe())
		SMP_fprintf(AnnotFile, "UNSAFE\n");
	else if (!IsSafe())
		SMP_fprintf(AnnotFile, "SPECSAFE\n");
	else {
		assert(IsSafe());
		SMP_fprintf(AnnotFile, "SAFE\n");
	}

	// If function has problems that limited our analyses, emit an information annotation so that
	//  other tools can be aware of which analyses will be sound.
	if (FuncHasProblems) {
		SMP_fprintf(InfoAnnotFile,	"%18llx %6zu FUNC PROBLEM %s ", (unsigned long long) this->GetStartAddr(),
			this->Size, this->GetFuncName());
		if (!this->AnalyzedSP) {
			SMP_fprintf(InfoAnnotFile, "STACKANALYSIS ");
		}
		if (this->HasSharedChunks()) {
			SMP_fprintf(InfoAnnotFile, "CHUNKS ");
		}
		if (this->HasUnresolvedIndirectJumps()) {
			SMP_fprintf(InfoAnnotFile, "JUMPUNRESOLVED ");
		}
		if (this->HasUnresolvedIndirectCalls()) {
			SMP_fprintf(InfoAnnotFile, "CALLUNRESOLVED ");
		}
		if (!this->HasGoodRTLs()) {
			SMP_fprintf(InfoAnnotFile, "BADRTLS ");
		}
		SMP_fprintf(InfoAnnotFile, "\n");
	}

	// Print annotations identifying pointer args to vulnerable library
	//  functions (e.g. strcpy()) that trace back to malloc(size) where
	//  size is constant, which permits run-time instrumentation or
	//  transformation (e.g. transforming strcpy() to strncpy(), strcat()
	//  to strncat(), etc.)
	if (!this->BufferCallConstBufferArgsMap.empty()) {
		STARSBufferCallArgMap::const_iterator BufferArgsIter;
		for (BufferArgsIter = this->BufferCallConstBufferArgsMap.cbegin(); BufferArgsIter != this->BufferCallConstBufferArgsMap.cend(); ++BufferArgsIter) {
			STARS_ea_t LibCallAddr = BufferArgsIter->first;
			list<pair<size_t, STARS_uval_t> >::const_iterator ArgIter;
			SMPInstr *LibCallInst = this->GetInstFromAddr(LibCallAddr);
			// Put all constant buffer size ptrargs on same annotation line
			SMP_fprintf(InfoAnnotFile, "%18llx %6zu CALL BUFFERSIZE ARGS ", (uint64_t) LibCallAddr,
				LibCallInst->GetSize());
			for (ArgIter = BufferArgsIter->second.cbegin(); ArgIter != BufferArgsIter->second.cend(); ++ArgIter) {
				size_t ArgPos = ArgIter->first;
				STARS_uval_t BufSize = ArgIter->second;
				SMP_fprintf(InfoAnnotFile, "%3zu %llu ", ArgPos, (uint64_t) BufSize);
			}
			SMP_fprintf(InfoAnnotFile, "ZZ %s\n", LibCallInst->GetDisasm());
		}
	}

	if (!global_STARS_program->ShouldSTARSPerformFullAnalysis()) {
		list<SMPInstr *>::iterator InstIter = Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
		++InstIter;  // skip marker instruction
#endif
		bool AllocSeen = false; // Reached LocalVarsAllocInstr yet?
		bool DeallocTrigger = false;
		for ( ; InstIter != Instrs.end(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			STARS_ea_t addr = CurrInst->GetAddr();
			if (CurrInst->MDIsFloatNop()) {
				SMP_msg("WARNING: FloatNop not used as marker instruction at %llx\n", (uint64_t) addr);
			}
			SMP_fprintf(AnnotFile, "%18llx %6zu INSTR BELONGTO %llx \n",
				(unsigned long long) addr, CurrInst->GetSize(), (unsigned long long) GetStartAddr());
			if (this->LocalVarsAllocInstr == addr) {
				AllocSeen = true;
				if (this->NeedsStackReferent)
					this->EmitStackFrameAnnotations(AnnotFile, CurrInst);
				else {
					int OptType = CurrInst->GetOptType(); 
					if (5 == OptType) { // ADD or SUB
						// Prevent mmStrata from extending the caller's stack frame
						//  to include the new allocation.
						SMP_fprintf(AnnotFile, "%18llx %6d INSTR LOCAL SafeFrameAlloc %s \n",
							(unsigned long long) addr, -1, CurrInst->GetDisasm());
					}
					else if (CurrInst->MDIsPushInstr()) {
						SMP_fprintf(AnnotFile, "%18llx %6d INSTR LOCAL NoWarn %s \n",
							(unsigned long long) addr, -3, CurrInst->GetDisasm());
					}
					// mmStrata ignores the DATAREF annotations anyway, so even though
					//  they are not needed, emit them for use by Strata and other tools
					//  in other projects besides MEDS.
					this->EmitStackFrameAnnotations(AnnotFile, CurrInst);
				}
			}
			// If this is the instruction which deallocated space
			//  for local variables, we set a flag to remind us to 
			//  emit an annotation on the next instruction.
			// mmStrata wants the instruction AFTER the
			//  deallocating instruction, so that it processes
			//  the deallocation after it happens. It inserts
			//  instrumentation before an instruction, not
			//  after, so it will insert the deallocating
			//  instrumentation before the first POP of callee-saved regs,
			//  if there are any, or before the return, otherwise.
			if (addr == this->LocalVarsDeallocInstr) {
				DeallocTrigger = true;
			}
			else if (DeallocTrigger) { // Time for annotation
				SMP_fprintf(AnnotFile,	"%18llx %6lu DEALLOC STACK esp - %lu %s\n", (unsigned long long) addr,
					(unsigned long) this->GetLocalVarsSize(), (unsigned long) this->GetLocalVarsSize(), CurrInst->GetDisasm());
				DeallocTrigger = false;
			}
			CurrInst->EmitAnnotations(this->UseFP, AllocSeen, this->NeedsStackReferent, AnnotFile, InfoAnnotFile);

			// Emit annotations for IDA Pro switch table analyses.
			SMPitype CurrDataFlow = CurrInst->GetDataFlowType();
			if (CurrDataFlow == INDIR_JUMP) {
				if (!CurrInst->IsTailCall()) { // jmp [reg] is not a switch jump if it is a tail call.
					global_STARS_program->PrintAllCodeToCodeXrefs(addr, CurrInst->GetSize(), false, false);
				}
			}

		} // end for all instructions

		// Free memory for the memory-constrained reduced analysis case.
		for (InstIter = this->Instrs.begin(); InstIter != this->Instrs.end(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			if (NULL != CurrInst) delete CurrInst;
		}

		list<SMPBasicBlock *>::iterator BlockIter;
		for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
			SMPBasicBlock *CurrBlock = (*BlockIter);
			if (NULL != CurrBlock) delete CurrBlock;
		}

		this->Instrs.clear();
		this->Blocks.clear();
		return;
	} // end if reduced analysis

	// Find and mark special cases that will affect the integer error and other security annotations.
	this->MarkSpecialNumericErrorCases();

	// Emit program CFG xref/indirect branch target annotations.
	if (this->FuncReturnsToCaller() && this->AreReturnTargetsComputed()) {
		this->EmitReturnTargetAnnotations();
	}
	else {
		if (!this->AreReturnTargetsComputed())
			SMP_msg("INFO: Return targets were not computed for func at %llx\n", (unsigned long long) this->GetFirstFuncAddr());
		else if (!this->FuncReturnsToCaller())
			SMP_msg("INFO: Return targets not computed for non-returning func at %llx\n", (unsigned long long) this->GetFirstFuncAddr());
	}

	// Loop through all instructions in the function.
	// Output optimization annotations for those
	//  instructions that do not require full computation
	//  of their memory metadata by the Memory Monitor SDT.
	list<std::size_t> LoopList; // for current block
	int CurrBlockNum = SMP_BLOCKNUM_UNINIT;
	list<SMPInstr *>::iterator InstIter = Instrs.begin();
#if SMP_USE_SSA_FNOP_MARKER
	++InstIter;  // skip marker instruction
#endif
	bool AllocSeen = false; // Reached LocalVarsAllocInstr yet?
	bool DeallocTrigger = false;
	bool PrefetchInstSeenLast = false; // inst before current inst was a prefetch
	bool UndefinedOpcodeSeenLast = false; // inst before current inst was an undefined opcode
	for (; InstIter != Instrs.end(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		STARS_ea_t addr = CurrInst->GetAddr();
		if (CurrInst->MDIsFloatNop()) {
			SMP_msg("WARNING: FloatNop not used as marker instruction at %llx\n", (uint64_t) addr);
		}
		CurrBlock = CurrInst->GetBlock();
		int BlockNum = CurrBlock->GetNumber();
		if (BlockNum != CurrBlockNum) {
			CurrBlockNum = BlockNum;
			if (0 < this->LoopCount) {
				LoopList.clear();
				this->BuildLoopList(BlockNum, LoopList);
			}
		}

		SMP_fprintf(AnnotFile, "%18llx %6zu INSTR BELONGTO %llx \n",
			(unsigned long long) addr, CurrInst->GetSize(), (unsigned long long) GetStartAddr());
		SMPitype CurrDataFlow = CurrInst->GetDataFlowType();
		bool IndirCallFlag = (CurrDataFlow == INDIR_CALL);
		if ((CurrDataFlow == INDIR_JUMP) || IndirCallFlag) {
			bool CompleteIndirCall = false;
			if (IndirCallFlag && (!this->ConstantIndirCalls.empty())) {
				CompleteIndirCall = (this->ConstantIndirCalls.find(addr) != this->ConstantIndirCalls.end());
			}
			global_STARS_program->PrintAllCodeToCodeXrefs(addr, CurrInst->GetSize(), IndirCallFlag, CompleteIndirCall);
		}

		if (this->LocalVarsAllocInstr == addr) {
			AllocSeen = true;
			if (this->NeedsStackReferent)
				this->EmitStackFrameAnnotations(AnnotFile, CurrInst);
			else {
				int OptType = CurrInst->GetOptType(); 
				if (5 == OptType) { // ADD or SUB
					// Prevent mmStrata from extending the caller's stack frame
					//  to include the new allocation.
					SMP_fprintf(AnnotFile, "%18llx %6d INSTR LOCAL SafeFrameAlloc %s \n",
						(unsigned long long) addr, -1, CurrInst->GetDisasm());
				}
				else if (CurrInst->MDIsPushInstr()) {
					SMP_fprintf(AnnotFile, "%18llx %6d INSTR LOCAL NoWarn %s \n",
						(unsigned long long) addr, -3, CurrInst->GetDisasm());
				}
				// mmStrata ignores the DATAREF annotations anyway, so even though
				//  they are not needed, emit them for use by Strata and other tools
				//  in other projects besides MEDS.
				this->EmitStackFrameAnnotations(AnnotFile, CurrInst);
			}
		}
		// If this is the instruction which deallocated space
		//  for local variables, we set a flag to remind us to 
		//  emit an annotation on the next instruction.
		// mmStrata wants the instruction AFTER the
		//  deallocating instruction, so that it processes
		//  the deallocation after it happens. It inserts
		//  instrumentation before an instruction, not
		//  after, so it will insert the deallocating
		//  instrumentation before the first POP of callee-saved regs,
		//  if there are any, or before the return, otherwise.
		if (addr == this->LocalVarsDeallocInstr) {
			DeallocTrigger = true;
		}
		else if (DeallocTrigger) { // Time for annotation
			SMP_fprintf(AnnotFile,	"%18llx %6lu DEALLOC STACK esp - %lu %s\n", (unsigned long long) addr,
				(unsigned long) this->GetLocalVarsSize(), (unsigned long) this->GetLocalVarsSize(), CurrInst->GetDisasm());
			DeallocTrigger = false;
		}

#ifndef SMP_REDUCED_ANALYSIS
		if (this->StackPtrAnalysisSucceeded() && this->HasGoodRTLs() && !this->HasUnresolvedIndirectJumps()) {
			CurrInst->EmitTypeAnnotations(this->UseFP, AllocSeen, this->NeedsStackReferent, AnnotFile, InfoAnnotFile);
#ifdef STARS_EMIT_NUMERIC_ERROR_ANNOTS
			CurrInst->EmitIntegerErrorAnnotations(InfoAnnotFile, LoopList);
#endif
		}
		else
#endif
			CurrInst->EmitAnnotations(this->UseFP, AllocSeen, this->NeedsStackReferent, AnnotFile, InfoAnnotFile);

		if (CurrInst->HasReturnOpcode() && this->GetReturnAddressStatus() == FUNC_SAFE)
			CurrInst->EmitSafeReturn(AnnotFile);

		// Emit IBT annotations for instructions that fit computed-goto patterns in libc/glibc, such
		//  as prefetch instructions and the instructions that follow them (computed goto often chooses
		//  between going to the prefetch or jumping just past it, and IDA Pro cannot analyze these libc
		//  macro-generated computed gotos even if they are not orphaned code). Likewise, an undefined opcode
		//  often separates an indirect jump and its first target, so inst after undefined opcode is IBT.
		bool EmitIBTAnnotation = (PrefetchInstSeenLast || UndefinedOpcodeSeenLast);
		if (CurrInst->MDIsPrefetchOpcode()) {
			PrefetchInstSeenLast = true;
			UndefinedOpcodeSeenLast = false;
			EmitIBTAnnotation = true;
		}
		else if (CurrInst->MDIsUndefinedOpcode()) {
			UndefinedOpcodeSeenLast = true;
			PrefetchInstSeenLast = false;
		}
		else {
			PrefetchInstSeenLast = false;
			UndefinedOpcodeSeenLast = false;
		}
		if (EmitIBTAnnotation) {
			global_STARS_program->PrintUnknownCodeXref(addr, CurrInst->GetSize(), ZST_COMPUTEDGOTO);
		}
	}  // end for all instructions

	// Loop through all basic blocks and emit profiling request annotations
	//  for those blocks that have unsafe memory writes in them.
	this->SafeBlocks = 0;
	this->UnsafeBlocks = 0;
	for (BlockIter = this->Blocks.begin(); BlockIter != this->Blocks.end(); ++BlockIter) {
		CurrBlock = (*BlockIter);
		if (CurrBlock->MaybeAliasedWrite()) {
			++(this->UnsafeBlocks);
#if SMP_OPTIMIZE_BLOCK_PROFILING
			vector<SMPInstr *>::iterator CurrInst;
			CurrInst = CurrBlock->GetFirstInst();
			if ((*CurrInst)->IsMarkerInst())
				++CurrInst;
			STARS_ea_t addr = (*CurrInst)->GetAddr();
			SMP_fprintf(AnnotFile,	"%18llx %6d BLOCK PROFILECOUNT %s\n", (unsigned long long)addr,
				(*CurrInst)->GetSize(), (*CurrInst)->GetDisasm());
#endif
		}
		else {
			++(this->SafeBlocks);
		}
	}
	
	if (global_STARS_program->ShouldSTARSShadowFuncPtrs() || global_STARS_program->ShouldSTARSMaximizeCFGImprovement()) {
		// Emit func pointer shadowing and/or CFG-improving annotations.
		this->EmitFuncPtrShadowingAnnotations2(InfoAnnotFile);
	}
	if (global_STARS_program->ShouldSTARSShadowCriticalArgs()) {
		// Emit shadowing annotations.
		this->EmitArgShadowingAnnotations(InfoAnnotFile);
	}

	LoopList.clear();

	return;
} // end of SMPFunction::EmitAnnotations()

// Emit Indirect Branch Target destinations for return instructions in this func.
void SMPFunction::EmitReturnTargetAnnotations(void) {
	std::bitset<1 + MD_LAST_REG_NO> ReturnDeadRegs;
	bool RetDeadRegsComputed = false;

	if (!this->ReturnTargets.empty()) {
		bool ReturnProcessed = false;
		bool CompleteTargets = true;
		bool OrphanCodeProblem = (this->IsCalledFromOrphanedCode() || this->IsTailCallChainFromOrphanedCode());
		for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
			if (CurrBlock->HasReturn()) {
				vector<SMPInstr *>::reverse_iterator LastInstIter = CurrBlock->GetRevInstBegin();
				SMPInstr *ReturnInst = (*LastInstIter);
				STARS_ea_t ReturnInstAddr = ReturnInst->GetAddr();
				if (!ReturnInst->HasReturnOpcode()) {
					continue; // must be tail call with RETURN data flow type
				}
				size_t RetInstSize = 1;
				bool success = (!ReturnTargets.empty());
				if (success)
					ReturnProcessed = true;
				set<STARS_ea_t>::const_iterator RetTargIter;
				for (RetTargIter = this->ReturnTargets.cbegin(); RetTargIter != this->ReturnTargets.cend(); ++RetTargIter) {
					STARS_ea_t RetTargetAddr = (*RetTargIter);
					bool TailCallFlag = (this->TailReturnTargets.find(RetTargetAddr) != this->TailReturnTargets.end());
					success &= global_STARS_program->PrintReturnInstXref(ReturnInstAddr, RetTargetAddr, RetInstSize, TailCallFlag);
				}
				if (success && (!(this->IsPossibleIndirectCallTarget() || this->MultipleEntryPoints || OrphanCodeProblem))) {
					global_STARS_program->PrintCodeToCodeXrefComplete(ReturnInstAddr, RetInstSize, this->ReturnTargets.size(), ZST_RETURN);
					++STARS_ReturnSetComplete;
					if (!RetDeadRegsComputed) {
						// Compute intersection of dead regs bitsets for each return target.
						for (set<STARS_ea_t>::const_iterator RetTargIter = this->ReturnTargets.cbegin(); RetTargIter != this->ReturnTargets.cend(); ++RetTargIter) {
							STARS_ea_t RetTargetAddr = (*RetTargIter);
							STARS_Function_t *FuncInfo = SMP_get_func(RetTargetAddr);
							assert(NULL != FuncInfo);
							// Get first addr in func and map to SMPFunction *.
							STARS_ea_t FirstAddrOfCaller = FuncInfo->get_startEA();
							SMPFunction *CallingFunc = this->GetProg()->FindFunction(FirstAddrOfCaller);
							assert(nullptr != CallingFunc);
							SMPInstr *RetTargetInst = CallingFunc->GetInstFromAddr(RetTargetAddr);
							assert(nullptr != RetTargetInst);
							if (!RetDeadRegsComputed) {
								ReturnDeadRegs |= RetTargetInst->GetDeadRegsSet();
								RetDeadRegsComputed = true;
							}
							else {
								ReturnDeadRegs &= RetTargetInst->GetDeadRegsSet();
							}
						}
					}
					ReturnInst->SetDeadRegsSet(ReturnDeadRegs);
				}
				else {
					CompleteTargets = false;
					++STARS_ReturnSetIncomplete;
					if (!success)
						++STARS_ReturnSetIncompleteAddrRange;
					if (this->IsPossibleIndirectCallTarget())
						++STARS_ReturnSetIncompleteAddrTaken;
					if (this->MultipleEntryPoints)
						++STARS_ReturnSetIncompleteMultiEntry;
					if (this->IsCalledFromOrphanedCode())
						++STARS_ReturnSetIncompleteCalledFromOrphanCode;
					if (this->IsTailCallChainFromOrphanedCode())
						++STARS_ReturnSetIncompleteTailCallChainFromOrphanCode;
				}
			}
		} // end for all basic blocks in function
		if (CompleteTargets) {
			if (ReturnProcessed)
				++STARS_FuncReturnSetComplete;
		}
		else {
			++STARS_FuncReturnSetIncomplete;
		}
	}
	return;
} // end of SMPFunction::EmitReturnTargetAnnotations()

// return -1 if CopyInstAddr not found in InArgPointerCopyAddrs, InArg position # otherwise
int SMPFunction::FindInArgNumFromCopyAddr(STARS_ea_t CopyInstAddr) {
	int InArgPositionNum = -1; // error code
	for (size_t index = 0; index < this->InArgPointerCopyAddrs.size(); ++index) {
		set<STARS_ea_t>::const_iterator FindIter = this->InArgPointerCopyAddrs[index].find(CopyInstAddr);
		if (FindIter != this->InArgPointerCopyAddrs[index].cend()) { // found it; index holds GlobalNameIndex
			map<unsigned int, size_t>::const_iterator MapIter = this->GlobalNameIndexMapToInArgIndex.find((unsigned int) index);
			if (MapIter != this->GlobalNameIndexMapToInArgIndex.cend()) { // found it
				InArgPositionNum = (int) MapIter->second;
			}
			break;
		}
	}
	return InArgPositionNum;
} // end of SMPFunction::FindInArgNumFromCopyAddr()

// Debug output dump.
void SMPFunction::Dump(void) {
	list<SMPBasicBlock *>::iterator CurrBlock;
	SMP_msg("Debug dump for function: %s\n", this->GetFuncName());
	SMP_msg("UseFP: %d  LocalVarsAllocInstr: %llx Block: %d\n", this->UseFP,
		(uint64_t) this->LocalVarsAllocInstr, this->GetBlockFromInstAddr(this->LocalVarsAllocInstr)->GetNumber());
	for (std::size_t index = 0; index < this->IDom.size(); ++index) {
		SMP_msg("IDOM for %zu: %d\n", index, this->IDom.at(index));
	}
	for (std::size_t index = 0; index < this->DomTree.size(); ++index) {
		SMP_msg("DomTree for %zu: ", index);
		list<int>::iterator DomIter;
		for (DomIter = this->DomTree.at(index).second.begin();
			DomIter != this->DomTree.at(index).second.end();
			++DomIter) {
				SMP_msg("%d ", *DomIter);
		}
		SMP_msg("\n");
	}
	SMP_msg("Global names: \n");
	STARSOpndSetIter NameIter;
	for (NameIter = this->GlobalNames.begin(); NameIter != this->GlobalNames.end(); ++NameIter) {
		SMP_msg("index: %u ", ExtractGlobalIndex(*NameIter));
		PrintListOperand(*NameIter);
		SMP_msg("\n");
	}
	SMP_msg("Blocks each name is defined in: \n");
	for (std::size_t index = 0; index < this->BlocksDefinedIn.size(); ++index) {
		SMP_msg("Name index: %zu Blocks: ", index);
		list<int>::iterator BlockIter;
		for (BlockIter = this->BlocksDefinedIn.at(index).begin();
			BlockIter != this->BlocksDefinedIn.at(index).end();
			++BlockIter) {
			SMP_msg("%d ", *BlockIter);
		}
		SMP_msg("\n");
	}
	SMP_msg("Control flow types: \n");
	for (map<STARS_ea_t, uint16_t>::const_iterator CFIter = this->ControlFlowMap.cbegin(); CFIter != this->ControlFlowMap.cend(); ++CFIter) {
		map<STARS_ea_t, int>::iterator MapIter = this->JumpFollowNodesMap.find(CFIter->first);
		if (MapIter != this->JumpFollowNodesMap.end()) { // found it
			ControlFlowType LastCFType = (ControlFlowType)CFIter->second;
			if ((LastCFType == BRANCH_IF_THEN) || (LastCFType == BRANCH_IF_THEN_ELSE) || (LastCFType == SHORT_CIRCUIT_BRANCH)) {
				int FollowBlockNum = MapIter->second;
				SMP_msg("Branch addr: %llx  CFType: %s Block: %d FollowBlock: %d\n", 
					(uint64_t) CFIter->first, CFTTypeStrings[CFIter->second], this->GetBlockFromInstAddr(CFIter->first)->GetNumber(), FollowBlockNum);
			}
		}
		else { // loop branch
			SMP_msg("Branch addr: %llx  CFType: %s Block: %d\n", (uint64_t) CFIter->first, CFTTypeStrings[CFIter->second], this->GetBlockFromInstAddr(CFIter->first)->GetNumber());
		}
	}

	if (this->LoopCount > 0) {
		SMP_msg("Block numbers by loop number:\n");
		for (size_t LoopNum = 0; LoopNum < this->LoopCount; ++LoopNum) {
			SMP_msg("Loop %d : ", LoopNum);
			for (size_t BlockNum = 0; BlockNum < (size_t)this->BlockCount; ++BlockNum) {
				if (this->FuncBlocksByLoop[LoopNum].GetBit(BlockNum)) {
					SMP_msg(" %zu", BlockNum);
				}
			}
			if (!this->LoopFollowNodes.empty()) {
				SMP_msg(" Follow block number: %d", this->LoopFollowNodes[LoopNum]);
			}
			SMP_msg("\n");
		}
		if (!this->LoopInductionVars.empty()) {
			SMP_msg("Induction vars by loop number:\n");
			for (size_t LoopNum = 0; LoopNum < this->LoopCount; ++LoopNum) {
				STARSInductionVarFamilyList IV = this->LoopInductionVars[LoopNum];
				for (STARSInductionVarFamilyList::const_iterator IVIter = IV.cbegin(); IVIter != IV.cend(); ++IVIter) {
					if ((*IVIter).BIVIncomingSSANum >= 0) {
						SMP_msg("\nLoop %d IVFamily: ", LoopNum);
						DumpInductionVarFamily(*IVIter);
					}
				}
			}
			SMP_msg("\n");
		}
	}
	for (CurrBlock = this->Blocks.begin(); CurrBlock != this->Blocks.end(); ++CurrBlock) {
		// Dump out the block number and data flow sets before the instructions.
		(*CurrBlock)->Dump();
	}
	SMP_msg("End of debug dump for function: %s\n", this->GetFuncName());
	return;
} // end of SMPFunction::Dump()

#define STARS_DEBUG_DUPLICATE_SEARCHES 1

// Is DefOp+DefSSANum at DefAddr used as address reg or as source operand in memory write?
bool SMPFunction::IsDefUsedInUnsafeMemWrite(STARSOpndTypePtr DefOp, int DefSSANum, STARS_ea_t DefAddr) {
	bool FoundMemWriteUse = false;

	list<pair<pair<STARSOpndTypePtr, int>, STARS_ea_t> > DefWorkList;
	pair<STARSOpndTypePtr, int> InitialSearchDef(DefOp, DefSSANum);
	pair<pair<STARSOpndTypePtr, int>, STARS_ea_t> InitialItem(InitialSearchDef, DefAddr);
	DefWorkList.push_back(InitialItem);
	set<pair<STARS_ea_t, int>, LessSearchOperand> AlreadySearchedSet;

	do {  // process DefWorkList
		pair<pair<STARSOpndTypePtr, int>, STARS_ea_t> HeadItem = DefWorkList.front();
		DefOp = HeadItem.first.first;
		DefSSANum = HeadItem.first.second;
		DefAddr = HeadItem.second;
		DefWorkList.pop_front();
		assert(!DefOp->IsMemOp());

		if (!(MDIsStackPtrReg(DefOp->GetReg(), this->UsesFramePointer()))) {
			// Ensure that we don't have a data dependence loop that causes
			//  duplicate searches.
			int DefHashIndex = HashGlobalNameAndSSA(DefOp, DefSSANum);
			pair<STARS_ea_t, int> SearchItem(DefAddr, DefHashIndex);
			pair<set<pair<STARS_ea_t, int>, LessSearchOperand>::iterator, bool> InsertResult = AlreadySearchedSet.insert(SearchItem);
			if (InsertResult.second) { // not previously in the AlreadySearchedSet
				// Find DefBlock and get the recursion started.
				SMPBasicBlock *DefBlock = this->GetBlockFromInstAddr(DefAddr);
				assert(NULL != DefBlock);
				this->ResetProcessedBlocks();
				FoundMemWriteUse = DefBlock->IsDefUsedInUnsafeMemWrite(DefWorkList, DefOp, DefSSANum, DefAddr);
			}
#if STARS_DEBUG_DUPLICATE_SEARCHES
			else {
				SMP_msg("INFO: Duplicate search avoided at %llx for DefHashIndex %x\n", (unsigned long long) DefAddr, DefHashIndex);
			}
#endif
		}

	} while (!(FoundMemWriteUse || DefWorkList.empty()));

	DefWorkList.clear();
	AlreadySearchedSet.clear();
	return FoundMemWriteUse;
} // end of SMPFunction::IsDefUsedInUnsafeMemWrite()

// Can the return address of any caller be read or written directly from this function?
bool SMPFunction::IsCallerReturnAddressReadOrWritten(void) {
	set<STARS_ea_t>::iterator CallerIter;
	bool CallerVulnerable = false;
	for (CallerIter = this->AllCallSources.begin(); CallerIter != this->AllCallSources.end(); ++CallerIter) {
		STARS_ea_t CallerFirstAddr = (*CallerIter); // we store first addrs of funcs that call us, not the call inst addrs, in AllCallSources.
		SMPFunction *CallerFunc = this->GetProg()->FindFunction(CallerFirstAddr);
		if ((nullptr != CallerFunc) && CallerFunc->StackPtrAnalysisSucceeded()) {
			if (CallerFunc->GetLocalVarsSize() < ((STARS_asize_t) this->GetMaxDirectStackAccessDelta())) {
				// We are reading or writing beyond the caller stack frame, perhaps just to
				//  the saved regs above the caller stack frame, but maybe to the caller's
				//  return address.
				SMP_msg("INFO: Func at %llx accesses above the local vars of caller func at %llx\n", 
					(unsigned long long) this->GetFirstFuncAddr(), (unsigned long long) CallerFunc->GetFirstFuncAddr());
				CallerVulnerable = true;
			}
		}
	}
	return CallerVulnerable;
} // end of SMPFunction::IsCallerReturnAddressReadOrWritten()

// Is UseOp+UseSSANum at UseAddr a safe write?
bool SMPFunction::IsAddressRegSafe(const STARSOpndTypePtr &UseOp, STARS_ea_t UseAddr, int UseSSANum) {
	bool SafeReg = false;
	if (!this->HasGoodSSAForm() || (0 > UseSSANum))
		return SafeReg;

	bool GlobalName = this->IsGlobalName(UseOp);
	SMPBasicBlock *UseBlock = this->GetBlockFromInstAddr(UseAddr);
	STARSOpndTypePtr DefMoveOp = nullptr;
	STARS_ea_t DefAddr = UseBlock->GetUltimateDefAddr(UseOp, UseAddr, UseSSANum, !GlobalName, true, false, DefMoveOp);

	// To be safe, the UseAddr needs to track back to a move instruction that is a stack pointer copy,
	//  or a load effective address using a direct stack address, or an incoming arg or incoming reg
	//  that can be similarly traced to a direct stack address in all callers.
	bool LiveIntoFunc = STARS_IsSSAMarkerPseudoID(DefAddr);
	bool PhiDef = STARS_IsBlockNumPseudoID(DefAddr);
	bool BadDefAddr = (STARS_BADADDR == DefAddr);
	bool LiveIntoBlock = STARS_IsLiveInPseudoID(DefAddr);
	if (LiveIntoFunc) {
		if (!(this->PossibleIndirectCallTarget || this->MultipleEntryPoints)) {
			// TODO: Search all callers to analyze their uses of DefOp.
			;
		}
	}
	else if (!(PhiDef || BadDefAddr || LiveIntoBlock)) {
		SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
		assert(NULL != DefInst);
		bool StackPtrCopy = DefInst->MDIsStackPointerCopy(this->UsesFramePointer());
		if (StackPtrCopy) {
			// Safe unless we have "lea reg,[esp+ecx]" so search for index reg
			STARSOpndTypePtr LeaMemOp = DefInst->GetLeaMemUseOp();
			if ((nullptr != LeaMemOp) && (!LeaMemOp->IsVoidOp())) {
				if (MDIsDirectStackAccessOpnd(LeaMemOp, this->UsesFramePointer())) {
					SafeReg = true;
				}
			}
			else {
				// Not lea inst; safe stack pointer copy
				SafeReg = true;
			}
		}
	}

	return SafeReg;
} // end of SMPFunction::IsAddressRegSafe()

// Analyzes the function to see if the return address can be marked as safe 
void SMPFunction::MarkFunctionSafe() {
#if SMP_DEBUG_FUNC
	SMP_msg(" Analyzing function %s and isLeaf = %d \n ", this->GetFuncName(), this->IsLeaf());
#endif
	bool HasCallTargets = false;
	bool HasStackPointerCopy = false;
	bool HasStackPointerPush = false;
	bool HasIndirectGlobalWrite = false;
	bool WritesAboveLocalFrame = false;		// Direct writes above local frame
	bool WritesAboveLocalFrameIndirect = false;	// Indirect writes above local frame
	bool HasIndexedStackWrite = false;
	bool HasUnsafeIndirectWrite = false;
	bool IsIndirectCallTarget = this->PossibleIndirectCallTarget; // could be called indirectly
	bool IsTailCallTarget = false; // could be called by jump instruction used as tail call
	bool MakesTailCall = false; // problem for Fast Returns, not for return address safety
	bool HasNoCallers = this->AllCallSources.empty();

	if (this->HasGoodRTLs()) {
		this->SetReturnAddressStatus(FUNC_SAFE);
		this->SetFuncSafe(true);
	}
	else { // don't process instructions if we have any bad RTLs
		this->SetReturnAddressStatus(FUNC_UNSAFE);
		this->SetUnsafeForFastReturns(true, UNSAFE_RETURN_ADDRESS);
		this->SetFuncSafe(false);
		this->SetSpecFuncSafe(false);
		this->SetNeedsFrame(true);
		this->SetSpecNeedsFrame(true);
		return;
	}

	if (!this->AllCallTargets.empty()) {
		HasCallTargets = true;
	}

#if SMP_USE_SWITCH_TABLE_INFO
	if (this->HasUnresolvedIndirectJumps()) {
#else
	if (this->IndirectJumps) {
#endif
#if SMP_DEBUG_FUNC
		SMP_msg("Function %s marked as unsafe due to indirect jumps\n", this->GetFuncName());
#endif
	}

	STARS_Function_t *FuncPtr = SMP_get_func(this->GetStartAddr());
	if (NULL == FuncPtr) {
		SMP_msg("FATAL ERROR in SMPFunction::MarkFunctionSafe; no func ptr\n");
	}
	assert(NULL != FuncPtr);

	bool TargetSuccess = FuncPtr->AnalyzeInstAsCallTarget(this, IsIndirectCallTarget, IsTailCallTarget);
	STARS_ea_t FirstAddr = this->GetFirstFuncAddr();

	if (!IsIndirectCallTarget) {
		IsIndirectCallTarget = this->GetProg()->IsCodeXrefFromData(FirstAddr);
	}

	this->PossibleIndirectCallTarget |= IsIndirectCallTarget;
	this->PossibleTailCallTarget = IsTailCallTarget;

	this->DetectMultiEntryFunction();
	bool SearchForCodeAddressesTaken = (!(this->PossibleIndirectCallTarget || this->MultipleEntryPoints));
	bool NonLoopingLeafFunc = (this->IsLeaf() && (0 == this->LoopCount) && this->HasGoodSSAForm());

	list<SMPInstr *>::iterator Instructions = Instrs.begin();
	SMPInstr *CurrInst;
#if SMP_USE_SSA_FNOP_MARKER
	++Instructions;  // skip marker instruction
#endif

	// While processing the stack pointer writes, the prologue code for
	//  saving the frame pointer register and allocating local variables needs to be
	//  handled.
	bool SaveEBP = false;
	bool XferESPtoEBP = false;
	for ( ; Instructions != Instrs.end(); ++Instructions) {
		CurrInst = (*Instructions);
		STARS_ea_t address = CurrInst->GetAddr();
		// If we do not already have a MultiEntryFunction determination, search
		//  for addresses beyond the entry point that have their address taken.
		// We exclude code addresses found in jump tables.
		bool AddrFoundInJumpTable = (this->JumpTableTargets.find(address) != this->JumpTableTargets.end());
		if (SearchForCodeAddressesTaken && (address != FirstAddr) && (!AddrFoundInJumpTable)) {
			// Could optimize this later by searching the CodeAddressesTaken set
			//  for the range of addresses in this function.
			if (global_STARS_program->IsCodeAddressTaken(address)) {
				this->MultipleEntryPoints = true;
				SearchForCodeAddressesTaken = false; // don't need to look for more
				SMP_msg("INFO: Func at %llx becoming multi-entry because code addr %llx found in data.\n",
					(unsigned long long) FirstAddr, (unsigned long long) address);
			}
			else if (this->GetProg()->IsCodeXrefFromData(address)) {
				this->MultipleEntryPoints = true;
				SearchForCodeAddressesTaken = false; // don't need to look for more
				SMP_msg("INFO: Func at %llx becoming multi-entry because code addr %llx found in CodeXrefsFromData.\n",
					(unsigned long long) FirstAddr, (unsigned long long) address);
			}
		}

		if (!CurrInst->IsAnalyzeable())
			continue;
#if SMP_VERBOSE_DEBUG_FUNC 
		SMP_msg(" Total number of defs for this instruction %d\n", CurrInst->NumDefs());
#endif
		if (!SaveEBP) { // still looking for "push ebp"
			if (CurrInst->MDIsPushInstr() && CurrInst->RegOpndMatches(0, MD_FRAME_POINTER_REG)) {
				SaveEBP = true;
				continue;
			}
		}
		else if (!XferESPtoEBP) { // found "push ebp", looking for "mov ebp,esp"
			if ((CurrInst->GetIDAOpcode() == STARS_NN_mov)
					&& (CurrInst->GetFirstDef()->GetOp()->MatchesReg(MD_FRAME_POINTER_REG))
					&& (CurrInst->GetFirstUse()->GetOp()->MatchesReg(MD_STACK_POINTER_REG))) {
				XferESPtoEBP = true;
				continue;
			}
		}
		if ((CurrInst->IsTailCall()) || (CurrInst->IsCondTailCall())) {
			// Moved up here because a tail call can be the point at which the stack
			//  frame returns to its prior state, making it the DeallocInstr.
			MakesTailCall = true;
		}
		if (address == this->LocalVarsAllocInstr ||	address == this->LocalVarsDeallocInstr) {
			continue;
		}

		if (CurrInst->MDIsStackPointerCopy(this->UseFP)) {
			// We have four cases for the register DEF that receives the stack pointer copy.
			// 1. The DEF is used (as a source reg or as an address reg) in an unsafe memory write.
			// 2. The DEF is live-in to an indirect call; who knows how it is used in the callee.
			// 3. The DEF is live-in to a direct call.
			// 4. None of the above; DEF does not reach any call or unsafe memory write.
			// Cases 1 and 2 are unsafe. We could be writing into the stack frame on top of a return
			//  address, unless we have more precise analysis of memory writes.
			// Case 3: DEF will either be used unsafely in the direct callee or not. We want to 
			//  leave our current status as FUNC_SAFE_IF_CALLEES_SAFE. Unsafe writes in the callee
			//  will be detected when the callee is analyzed.
			// Case 4 is safe.
			// As a result, we make our IsDefUsedInUnsafeMemWrite() search method return true for
			//  both case 1 and case 2 (conservatively assuming that indirect callees use the DEF
			//  in memory writes).
			// If the stack pointer is copied into a memory location instead of a register, it is unsafe.
			set<DefOrUse, LessDefUse>::iterator DefIter = CurrInst->GetFirstNonFlagsDef();
			assert(DefIter != CurrInst->GetLastDef());
			STARSOpndTypePtr CopyOp = DefIter->GetOp();
			int DefSSANum = DefIter->GetSSANum();
			if ((CopyOp->IsRegOp()) && this->HasGoodSSAForm() && (!HasStackPointerCopy) && (!(this->IsDefUsedInUnsafeMemWrite(CopyOp, DefSSANum, address)))) {
				SMP_msg("INFO: Safe stack pointer copy at %llx\n", (unsigned long long) address);
			}
			else {
				HasStackPointerCopy = true;
				if (CurrInst->MDIsLoadEffectiveAddressInstr()) {
					// If an lea instruction loads an address above
					//  the stack frame, we must assume that writes
					//  above the stack frame could occur.
					STARSOpndTypePtr TempOp = CurrInst->GetLeaMemUseOp();
					if (this->AccessAboveLocalFrame(TempOp, CurrInst->AreDefsNormalized(), address, false))
						WritesAboveLocalFrameIndirect = true;
				}
#if 1
				SMP_msg(" Function %s marked as unsafe due to stack pointer copy ", this->GetFuncName());
				SMP_msg("%s %llx \n", CurrInst->GetDisasm(), (unsigned long long) CurrInst->GetAddr());
#endif
			}
		}
		if (CurrInst->MDIsPushInstr()) {
			// not exactly sure how to handle this instruction
			// for the moment if it's a push on a esp or usefp & ebp
			// mark as unsafe
			if (CurrInst->RegOpndMatches(0, MD_STACK_POINTER_REG) || 	 
					(this->UseFP && CurrInst->RegOpndMatches(0, MD_FRAME_POINTER_REG))) {
				HasStackPointerPush = true;
#if SMP_DEBUG_FUNC 
				SMP_msg(" Function %s marked as unsafe due to push on ebp or esp outside of function header \n", this->GetFuncName());	
				SMP_msg("%s %x\n", CurrInst->GetDisasm(), CurrInst->GetAddr());
#endif
			}
			continue;
		}
		if (CurrInst->MDIsPopInstr() || CurrInst->HasReturnOpcode()) {
			// ignore pops and returns for the moment
			 continue;
		}
		if (CurrInst->HasDestMemoryOperand()) {
			STARSOpndTypePtr Operand = CurrInst->GetMemDef();
			int BaseReg;
			int IndexReg;
			uint16_t ScaleFactor;
			STARS_ea_t offset;
			if (Operand->IsStaticMemOp()) {
				HasIndirectGlobalWrite = CurrInst->HasIndirectMemoryWrite();
			}
			else if (Operand->IsMemDisplacementOp()) {
				if (MDIsStackAccessOpnd(Operand, this->UsesFramePointer())) {
					if (!CurrInst->HasIndirectMemoryWrite()) {
						bool tempWritesAboveLocalFrame = this->WritesAboveLocalFrame(Operand, CurrInst->AreDefsNormalized(), address);
						WritesAboveLocalFrame |= tempWritesAboveLocalFrame;
					}
					else {
						bool tempWritesAboveLocalFrameIndirect = this->IndexedWritesAboveLocalFrame(Operand);

						/* separate indirect writes to this frame from indirect writes to another frame */
						if (tempWritesAboveLocalFrameIndirect) {
							WritesAboveLocalFrameIndirect = true;
						}
						else {
							HasIndexedStackWrite = true;
						}
					}
				}
#if 0
				else if (!this->HasUnsafeIndirectWrites && (!CurrInst->IsWriteSafetyAnalyzed())) { 
					// not yet unsafe due to indirect writes; analyze
					bool LoopingWrite = CurrInst->MDHasAnyRepeatPrefix();
					MDExtractAddressFields(Operand, BaseReg, IndexReg, ScaleFactor, offset);
					if (!LoopingWrite && (ScaleFactor == 0) && (!((BaseReg != STARS_x86_R_none) && (IndexReg != STARS_x86_R_none)))) {
						// Non-scaled, base or index (implied by MemDisplacementOp()) but not both.
						STARS_regnum_t AddrReg = (STARS_regnum_t) BaseReg;
						if (IndexReg != STARS_x86_R_none) {
							AddrReg = (STARS_regnum_t) IndexReg;
						}
						AddrReg = MDCanonicalizeSubReg(AddrReg);
						STARSOpndTypePtr AddrRegOp = CurrInst->MakeRegOpnd(AddrReg);
						STARSDefUseIter UseIter = CurrInst->FindUse(AddrRegOp);
						assert(UseIter != CurrInst->GetLastUse());
						bool SafeAddrReg = this->IsAddressRegSafe(AddrRegOp, address, UseIter->GetSSANum());
						if (SafeAddrReg) {
							SMP_msg("INFO: Found safe indirect memory write at %p\n", address);
						}
						else {
							this->HasUnsafeIndirectWrites = true;
						}
					}
					else {
						this->HasUnsafeIndirectWrites = true;
					}
				}
#else
				else if (!this->HasUnsafeIndirectWrites) {
					STARSDefUseIter DefIter = CurrInst->FindDef(Operand);
					assert(DefIter != CurrInst->GetLastDef());
					this->HasUnsafeIndirectWrites = (!DefIter->IsSafeMemWriteDef());
				}
#endif
			}
			else if (Operand->IsMemNoDisplacementOp()) {
				// so phrase is of the form [BASE_REG + IND ]
				// if the index register is missing just make sure that
				// the displacement is below stack frame top
				if (MDIsStackAccessOpnd(Operand, this->UsesFramePointer())) {
					if (!CurrInst->HasIndirectMemoryWrite()) {
						/* addressing mode is *esp or *ebp */
						continue;
					}
					else {
						HasIndexedStackWrite = true;
					}
				}
				else if (!this->HasUnsafeIndirectWrites) { // not yet unsafe due to indirect writes; analyze
#if 0
					if (!CurrInst->IsWriteSafetyAnalyzed()) {
						bool LoopingWrite = CurrInst->MDHasAnyRepeatPrefix();
						MDExtractAddressFields(Operand, BaseReg, IndexReg, ScaleFactor, offset);
						if (!LoopingWrite && (ScaleFactor == 0) && (!((BaseReg != STARS_x86_R_none) && (IndexReg != STARS_x86_R_none)))) {
							// Non-scaled, base or index (implied by MemNoDisplacementOp()) but not both.
							STARS_regnum_t AddrReg = (STARS_regnum_t)BaseReg;
							if (IndexReg != STARS_x86_R_none) {
								AddrReg = (STARS_regnum_t)IndexReg;
							}
							AddrReg = MDCanonicalizeSubReg(AddrReg);
							STARSOpndTypePtr AddrRegOp = CurrInst->MakeRegOpnd(AddrReg);
							STARSDefUseIter UseIter = CurrInst->FindUse(AddrRegOp);
							assert(UseIter != CurrInst->GetLastUse());
							bool SafeAddrReg = this->IsAddressRegSafe(AddrRegOp, address, UseIter->GetSSANum());
							if (SafeAddrReg) {
								SMP_msg("INFO: Found safe indirect memory write at %p\n", address);
							}
							else {
								this->HasUnsafeIndirectWrites = true;
							}
						}
						else {
							this->HasUnsafeIndirectWrites = true;
						}
					}
#else
					STARSDefUseIter DefIter = CurrInst->FindDef(Operand);
					assert(DefIter != CurrInst->GetLastDef());
					this->HasUnsafeIndirectWrites = (!DefIter->IsSafeMemWriteDef());
#endif
				}
			}
			else {
				SMP_msg("FATAL ERROR: Unknown memory operand type in MarkFunctionSafe at %llx\n", (unsigned long long) address);
				assert(false);
			}
		} // end if HasMemDestOperand() in current instruction
		// Find instructions that take the address of a callee-saved reg, return address, or stack-inarg.
		if (CurrInst->MDIsLoadEffectiveAddressInstr()) {
			STARSOpndTypePtr LeaMemOp = CurrInst->GetLeaMemUseOp();
			if (MDIsDirectStackAccessOpnd(LeaMemOp, UseFP)) {
				// Exclude the case in which the stack pointer register is being adjusted with
				//  an lea opcode, e.g. lea esp,[ebp-k] deallocates the stack frame to prepare
				//  for popping the saved regs.
				STARSDefUseIter LeaDefIter = CurrInst->GetFirstNonFlagsDef();
				STARSOpndTypePtr LeaDefOp = LeaDefIter->GetOp();
				if (!LeaDefOp->MatchesReg(MD_STACK_POINTER_REG)) {
					StackAccessType AccessVal = this->GetStackAccessType(LeaMemOp, CurrInst->AreDefsNormalized(), address, false);
					FILE *AlarmFile = global_STARS_program->GetAlarmFile();
					if (STARS_STACK_CALLEE_SAVED_REG == AccessVal) {
						SMP_fprintf(AlarmFile, "%18llx %6d INSTR CALLEESAVEDREGADDRESSTAKEN \n", (uint64_t)address, CurrInst->GetSize());
					}
					else if (STARS_STACK_RETURN_ADDRESS == AccessVal) {
						SMP_fprintf(AlarmFile, "%18llx %6d INSTR RETURNADDRADDRESSTAKEN \n", (uint64_t)address, CurrInst->GetSize());
					}
					else if (STARS_STACK_INARG == AccessVal) {
						SMP_fprintf(AlarmFile, "%18llx %6d INSTR INARGADDRESSTAKEN \n", (uint64_t)address, CurrInst->GetSize());
					}
					else if (STARS_STACK_OUTARG == AccessVal) {
						SMP_fprintf(AlarmFile, "%18llx %6d INSTR OUTARGADDRESSTAKEN \n", (uint64_t) address, CurrInst->GetSize());
					}
					else if (STARS_STACK_LOCAL_FRAME == AccessVal) {
						SMP_fprintf(AlarmFile, "%18llx %6d INSTR LOCALVARADDRESSTAKEN \n", (uint64_t) address, CurrInst->GetSize());
					}
					else {
						assert(STARS_STACK_UNKNOWN == AccessVal);
						SMP_fprintf(AlarmFile, "%18llx %6d INSTR UNKNOWNSTACKADDRESSTAKEN \n", (uint64_t) address, CurrInst->GetSize());
					}
				}
			}
		}
	} // end for all instructions

	// We can reset this->HasUnsafeIndirectWrites for non-looping leaf functions.
	//  The only way to overwrite the return address in such a function is to directly
	//  write to it, which is detected in the AccessesReturnAddress boolean flag below.
	if (NonLoopingLeafFunc && this->HasUnsafeIndirectWrites) {
		this->HasUnsafeIndirectWrites = false;
		SMP_msg("INFO: Resetting HasUnsafeIndirectWrites for non-looping leaf func at %llx\n", (unsigned long long) this->GetFirstFuncAddr());
	}

	// For mmStrata bounds checking of the stack frame, we don't care
	//  about indirect writes unless they are to the stack.
	bool SpecUnsafe = (HasStackPointerCopy || HasStackPointerPush || HasIndexedStackWrite || this->HasSharedChunks()
		|| this->HasUnresolvedIndirectJumps());
	bool Unsafe = SpecUnsafe || this->HasUnresolvedIndirectCalls();

	this->SetFuncSafe(!Unsafe);
	this->SetSpecFuncSafe(!SpecUnsafe);

	unsigned short FastReturnStatus = this->GetFastReturnStatus();
#if 0
	bool AccessesReturnAddress = (0 != (FastReturnStatus & (RETURN_ADDRESS_WRITE | RETURN_ADDRESS_READ)));
#else  // reading the return address is not unsafe in itself
	bool AccessesReturnAddress = (0 != (FastReturnStatus & RETURN_ADDRESS_WRITE));
#endif
	if (WritesAboveLocalFrame) {
		// Detect safe and dangerous cases.
		if (!AccessesReturnAddress) {
			// No direct reads or writes of the return address.
			// See if we can read or write the return address of any caller.
			WritesAboveLocalFrame = this->IsCallerReturnAddressReadOrWritten(); // reset to false if safe
		}
	}

	this->WritesAboveRA = WritesAboveLocalFrameIndirect;  // currently unused
	this->SafeCallee = (!Unsafe) && (!WritesAboveLocalFrameIndirect) && this->AnalyzedSP && (!WritesAboveLocalFrame);
	this->SpecSafeCallee = (!SpecUnsafe) && (!WritesAboveLocalFrameIndirect) && this->AnalyzedSP && (!WritesAboveLocalFrame);
	this->SetNeedsFrame(Unsafe);
	this->SetSpecNeedsFrame(SpecUnsafe);

	this->HasIndirectWrites = this->HasIndirectWrites || this->HasUnsafeIndirectWrites || (HasIndexedStackWrite || WritesAboveLocalFrameIndirect || HasIndirectGlobalWrite);

#if 1
	bool UnsafeReturnAddr = (Unsafe || AccessesReturnAddress || this->HasUnsafeIndirectWrites || (!this->AnalyzedSP) || this->MultipleEntryPoints);
#else
	bool UnsafeReturnAddr = (Unsafe || AccessesReturnAddress || WritesAboveLocalFrameIndirect || HasIndirectGlobalWrite
		|| HasIndirectWrite || (!this->AnalyzedSP) || this->MultipleEntryPoints);
#endif
	// We have conditions that cause fast returns to be unsafe even though the return address is safe.
	if (this->PossibleIndirectCallTarget) {
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because it is indirect call target.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, INDIRECTLY_CALLED);
	}
	if (this->PossibleTailCallTarget) {
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because it is tail call target.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, TAIL_CALL_TARGET);
	}
	if (HasNoCallers) {
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because it has no callers.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, NO_CALLERS);
	}
	if (MakesTailCall) {
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because it makes a tail call.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, MAKES_TAIL_CALL);
	}
	if (this->MultipleEntryPoints) {
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because it has multiple entry points.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, MULTIPLE_ENTRY_POINTS);
	}
	if (this->HasUnresolvedIndirectJumps()) {
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because it has unresolved indirect jumps.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, UNRESOLVED_INDIR_JUMP);
	}

	if (UnsafeReturnAddr) {
#if STARS_CONSERVATIVE_FAST_RETURNS
		SMP_msg("INFO: Function at %llx becoming unsafe for fast returns because its return addr is UNSAFE.\n", (unsigned long long) this->GetFirstFuncAddr());
		this->SetUnsafeForFastReturns(true, UNSAFE_RETURN_ADDRESS);
#endif

		this->SetReturnAddressStatus(FUNC_UNSAFE);
#if SMP_DEBUG_FUNC_SAFETY
		SMP_msg("UNSAFE function %s ", this->GetFuncName());
		SMP_msg("StackPtrCopy: %d StackPtrPush: %d IndirectGlobal: %d ",
			HasStackPointerCopy, HasStackPointerPush, HasIndirectGlobalWrite);
		SMP_msg("WritesAboveFrame: %d AccessesRA: %d IndirectStack: %d IndirectWrite: %d ",
			WritesAboveLocalFrame, AccessesReturnAddress, HasIndexedStackWrite, this->HasUnsafeIndirectWrites);
		SMP_msg("AnalyzedSP: %d UnresolvedCalls: %d UnresolvedJumps: %d SharedChunks: %d IsLeaf: %d ",
			this->AnalyzedSP, this->HasUnresolvedIndirectCalls(), this->HasUnresolvedIndirectJumps(),
			this->HasSharedChunks(), this->IsLeaf());
		SMP_msg("IndirCallTarget: %d TailCallTarget: %d HasNoCallers: %d MultiEntry: %d\n", 
			this->PossibleIndirectCallTarget, this->PossibleTailCallTarget, HasNoCallers, this->MultipleEntryPoints);
#endif
	}
	else if (HasCallTargets) {
		this->SetReturnAddressStatus(FUNC_SAFE_IF_CALLEES_ARE_SAFE);
	}

#if SMP_DEBUG_FUNC
	if (this->GetReturnAddressStatus() == FUNC_SAFE)
		SMP_msg("Function %s is SAFE\n", GetFuncName());
	else if (this->GetReturnAddressStatus() == FUNC_UNSAFE)
		SMP_msg("Function %s is UNSAFE\n", GetFuncName());
	else if (this->GetReturnAddressStatus() == FUNC_SAFE_IF_CALLEES_ARE_SAFE)
		SMP_msg("Function %s is SAFE_IF_CALLEES_ARE_SAFE\n", GetFuncName());

	if (!Unsafe) 
		SMP_msg("Function %s is mmSAFE\n", GetFuncName());
	else 
		SMP_msg("Function %s is mmUNSAFE\n", GetFuncName());

	if (!SpecUnsafe) 
		SMP_msg("Function %s is Speculatively mmSAFE\n", GetFuncName());
	else 
		SMP_msg("Function %s is Speculatively mmUNSAFE\n", GetFuncName());

#endif
	return;
} // end of SMPFunction::MarkFunctionSafe()

// Perform look-ahead steps needed before translation to SPARK Ada.
void SMPFunction::PreProcessForSPARKAdaTranslation(void) {
	bool HasLoops = (this->LoopCount > 0);
	bool UseFP = this->UsesFramePointer();

	for (vector<SMPBasicBlock *>::iterator BlockIter = this->RPOBlocks.begin(); BlockIter != this->RPOBlocks.end(); ++BlockIter) {
		SMPBasicBlock *CurrBlock = (*BlockIter);
		if (CurrBlock->HasCallInstruction()) {
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				if (CurrInst->GetDataFlowType() == CALL) {
					string CalledFuncName = CurrInst->GetTrimmedCalledFunctionName();
					if (IsLibFuncName(CalledFuncName)) {
						pair<set<string>::iterator, bool> InsertResult = this->Program->InsertLibraryFuncName(CalledFuncName);
						// !!!!****!!!! Need to find setuid() arg source (e.g. RDI comes from [RBP-xx]) and save it for Ada pragma assert
						//  and pragma Loop_Invariants.
					}
				}
			}
		}
		size_t BlockNum = (size_t) CurrBlock->GetNumber();
		if (HasLoops && CurrBlock->HasMemoryWrite() && this->IsBlockInAnyLoop((int) BlockNum)) {
			for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
				SMPInstr *CurrInst = (*InstIter);
				STARSOpndTypePtr MemDefOp = CurrInst->GetMemDef();
				STARS_ea_t offset;
				size_t DataSize;
				bool UsedFramePointer, IndexedAccess, SignedMove, UnsignedMove;
				if ((nullptr != MemDefOp) && this->MDGetStackOffsetAndSize(CurrInst, MemDefOp, this->MinStackAccessOffset, offset, DataSize, UsedFramePointer,
					IndexedAccess, SignedMove, UnsignedMove)) {
					// NOTE: Deal with indexed accesses later.
					int SignedOffset = (int) offset;
					assert(0 <= SignedOffset);  // before adding min stack delta, which is negative
					SignedOffset += this->MinStackAccessOffset;

					for (size_t j = 0; j < DataSize; ++j) { // offset has zero-based index into negative offset vectors
						for (size_t LoopIndex = 0; LoopIndex < (size_t) this->LoopCount; ++LoopIndex) {
							if (this->FuncLoopsByBlock[BlockNum].GetBit(LoopIndex)) {
								if (SignedOffset < 0)
									this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].SetBit(offset + j);
								else
									this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].SetBit(SignedOffset + j);
							}
						} // end for all loops
					} // end for all bytes in DataSize of current stack write
				}
			} // end for all instructions in current block
		}
	} // end for all blocks

	return;
} // end of SMPFunction::PreProcessForSPARKAdaTranslation()

// update {Nega, Posi}tiveStackBytesWrittenByLoop[LoopNum]
void SMPFunction::UpdateStackBytesWrittenByLoop(STARS_sval_t FinalStackPtrOffset, size_t MemWidth, size_t LoopNum) {
	assert(LoopNum < this->GetNumLoops());
	STARS_sval_t SignedOffset = FinalStackPtrOffset - this->MinStackAccessOffset;
	size_t ArrayIndex;
	if (FinalStackPtrOffset >= 0) {
		ArrayIndex = (size_t) FinalStackPtrOffset;
		assert(ArrayIndex < this->PositiveOffsetStackBytesWrittenByLoop[LoopNum].GetNumBits());
	}
	else {
		ArrayIndex = (size_t) SignedOffset;
		assert(ArrayIndex < this->NegativeOffsetStackBytesWrittenByLoop[LoopNum].GetNumBits());
	}

	for (size_t j = 0; j < MemWidth; ++j) { // offset has zero-based index into negative or positive offset vector
		if (FinalStackPtrOffset < 0)
			this->NegativeOffsetStackBytesWrittenByLoop[LoopNum].SetBit(ArrayIndex + j);
		else
			this->PositiveOffsetStackBytesWrittenByLoop[LoopNum].SetBit(ArrayIndex + j);
	} // end for all bytes in MemWidth of current stack write
	return;
} // end of SMPFunction::UpdateStackBytesWrittenByLoop()

#if 0
// specify mem ranges that loop changes; return true if anything is printed
bool SMPFunction::EmitSPARKLoopMemRangePostCondition(FILE *HeaderFile, FILE *BodyFile, STARS_ea_t LoopAddr, bool PostPrintStarted) {
	SMPBasicBlock *LoopHeaderBlock = this->GetBlockFromInstAddr(LoopAddr);
	int HeaderBlockNum = LoopHeaderBlock->GetNumber();
	int LoopNum = this->GetLoopNumFromHeaderBlockNum(HeaderBlockNum);
	assert(0 <= LoopNum);
	size_t LoopIndex = (size_t) LoopNum;
	size_t LoopNumPlusOne = LoopIndex + 1;
	STARS_sval_t IncomingStackDelta = (*LoopHeaderBlock->GetFirstInst())->GetStackPtrOffset();
	assert(0 >= IncomingStackDelta);
	IncomingStackDelta = (0 - IncomingStackDelta); // convert to positive offset to correct callee mem exprs
	bool StackWritten = (this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet() || this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet());
	bool IndirectStackWrites = this->LoopMemExprsExpandToStackOffsets[LoopIndex];
	bool CalleeWrites = this->LoopHasCalleeMemWrites[LoopIndex];
	bool HasArgs = this->DoesLoopHaveArgs(LoopNum);
	StackWritten = (StackWritten || IndirectStackWrites);
	bool LoopInvariantWrites = (0 < this->GetNumInArgsUsedInMemWrites(LoopNumPlusOne));

	// Emit memory writing range post-conditions and nearly identical loop invariants.
	bool NeedsOutput = (StackWritten || HasArgs || CalleeWrites || LoopInvariantWrites);
	if (NeedsOutput) {
		if (!PostPrintStarted) {
			SMP_fprintf(HeaderFile, "\tPost => ");
		}
		if (StackWritten) {
			this->EmitSPARKStackMemRangePostCondition(HeaderFile, LoopIndex, LoopAddr, PostPrintStarted);
		}
		if (HasArgs) {
			if (!StackWritten) {
				if (PostPrintStarted) {
					SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
				}
				else { // nothing written yet since "Post => "
					SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
				}
			}
			else {
				SMP_fprintf(HeaderFile, "\n\t\tand not X86.InMemoryRange(i, ");
			}
			this->LoopMemWriteLowerBoundsExprs[LoopIndex]->EmitSPARKAda(HeaderFile, true, false, false, HasArgs, false);
			SMP_fprintf(HeaderFile, ", ");
			this->LoopMemWriteUpperBoundsExprs[LoopIndex]->EmitSPARKAda(HeaderFile, true, false, false, HasArgs, false);
			SMP_fprintf(HeaderFile, ") ");
		}
		if (CalleeWrites) {
			size_t OutputCount;
			if (!(StackWritten || HasArgs)) {
				OutputCount = 0;
			}
			else {
				OutputCount = 1;
			}
			for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopNumPlusOne].size(); ++i) {
				STARSExpression *CurrExpr = (*(this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].first));
				// The StackPtrCase below is now handled via the {Posi,Nega}tiveStackOffsetWritesByLoop[] vectors
				if (!CurrExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
					size_t ByteWidth = this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].second;
					if (OutputCount == 0) {
						if (PostPrintStarted) {
							SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if not X86.InRange64(i, ");
						}
						else { // nothing written since "Post => "
							SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if not X86.InRange64(i, ");
						}
					}
					else {
						SMP_fprintf(HeaderFile, "\n\t\tand not X86.InRange64(i, ");
					}
					// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
					//  this loop. We need to make two adjustments when printing these exprs:
					//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
					//   2. Offset the stack address with the negated incoming stack offset.
					bool StackPtrCase = CurrExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
					STARS_ea_t PreLoopDefAddr = CurrExpr->GetLeftPreLoopDefAddr();
					bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
					if (!StackPtrCase && (!PseudoAddr)) {
						// We don't want to trace callee mem exprs all the way back to this function's InArgs.
						//  We only want to trace back to the value that comes into the loop. The loop boundary
						//  crossing is recorded in the expression.
						STARSExpression *LoopBoundaryExpr = CurrExpr->Clone();
						LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
						LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
						LoopBoundaryExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, false, false, HasArgs);
					}
					else {
						CurrExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, false, false, HasArgs);
						if ((0 != IncomingStackDelta) && StackPtrCase) {
							// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
							SMP_fprintf(HeaderFile, " + 16#%x#", IncomingStackDelta);
						}
					}
					SMP_fprintf(HeaderFile, ", %u) ", ByteWidth);
					++OutputCount;
				}
			} // end for all elements of LoopMemAddrExprWidthsFromCallees[LoopIndex]
			SMP_fprintf(HeaderFile, "\n\t\tand X86.InSafeRegion64(i, X86.RSP - 8)"); // only for CalleeWrites case
		} // end if CalleeWrites
		if (LoopInvariantWrites) {
			size_t OutputCount;
			if (!(StackWritten || HasArgs)) {
				OutputCount = 0;
			}
			else {
				OutputCount = 1;
			}
			for (size_t i = 0; i < this->GetNumInArgsUsedInMemWrites(LoopNumPlusOne); ++i) {
				STARSExprSetIter CurrExprIter = this->GetInArgExprUsedInMemWrite(LoopNumPlusOne, i);
				STARSExpression *CurrExpr = (*CurrExprIter);
				// The StackPtrCase below is now handled via the {Posi,Nega}tiveStackOffsetWritesByLoop[] vectors
				if (!CurrExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
					size_t ByteWidth = this->GetInArgMemWriteWidth(LoopNumPlusOne, i);
					if (OutputCount == 0) {
						if (PostPrintStarted) {
							SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if not X86.InRange64(i, ");
						}
						else { // nothing written since "Post => "
							SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if not X86.InRange64(i, ");
						}
					}
					else {
						SMP_fprintf(HeaderFile, "\n\t\tand not X86.InRange64(i, ");
					}
					// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
					//  this loop. We need to make two adjustments when printing these exprs:
					//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
					//   2. Offset the stack address with the negated incoming stack offset.
					bool StackPtrCase = CurrExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
					STARS_ea_t PreLoopDefAddr = CurrExpr->GetLeftPreLoopDefAddr();
					bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
					if (!StackPtrCase && (!PseudoAddr)) {
						// We don't want to trace callee mem exprs all the way back to this function's InArgs.
						//  We only want to trace back to the value that comes into the loop. The loop boundary
						//  crossing is recorded in the expression.
						STARSExpression *LoopBoundaryExpr = CurrExpr->Clone();
						LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
						LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
						LoopBoundaryExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, false, false, HasArgs);
					}
					else {
						CurrExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, false, false, HasArgs);
						if ((0 != IncomingStackDelta) && StackPtrCase) {
							// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
							SMP_fprintf(HeaderFile, " + 16#%x#", IncomingStackDelta);
						}
					}
					SMP_fprintf(HeaderFile, ", %u) ", ByteWidth);
					++OutputCount;
				}
			} // end for all elements of GetNumInArgsUsedInMemWrites(LoopNumPlusOne)
			SMP_fprintf(HeaderFile, "\n\t\tand X86.InSafeRegion64(i, X86.RSP - 8)"); // only for CalleeWrites case
		} // end if LoopInvariantWrites

		SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i)))");

		PrintSPARKIndentTabs(BodyFile);
		if (StackWritten) {
			SMP_fprintf(BodyFile, "pragma Loop_Invariant(for all i in Unsigned64 => (if ");
			this->EmitSPARKStackMemRangePostCondition(BodyFile, LoopIndex, LoopAddr, false);
		}
		if (HasArgs) {
			if (!StackWritten) {
				SMP_fprintf(BodyFile, "pragma Loop_Invariant(for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
			}
			else {
				SMP_fprintf(BodyFile, "\n\t\tand not X86.InMemoryRange(i, ");
			}
			this->LoopMemWriteLowerBoundsExprs[LoopIndex]->EmitSPARKAda(BodyFile, true, false, false, HasArgs);
			SMP_fprintf(BodyFile, ", ");
			this->LoopMemWriteUpperBoundsExprs[LoopIndex]->EmitSPARKAda(BodyFile, true, false, false, HasArgs);
			SMP_fprintf(BodyFile, ") ");
		}

		if (CalleeWrites) {
			size_t OutputCount;
			if (!(StackWritten || HasArgs)) {
				OutputCount = 0;
			}
			else {
				OutputCount = 1;
			}
			for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopNumPlusOne].size(); ++i) {
				size_t ByteWidth = this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].second;
				STARSExpression *CurrExpr = (*(this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].first));
				if (!CurrExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
					if (OutputCount == 0) {
						SMP_fprintf(BodyFile, "pragma Loop_Invariant(for all i in Unsigned64 => (if not X86.InRange64(i, ");
					}
					else {
						SMP_fprintf(BodyFile, "\n\t\tand not X86.InRange64(i, ");
					}
					// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
					//  this loop. We need to make two adjustments when printing these exprs:
					//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
					//   2. Offset the stack address with the negated incoming stack offset.
					bool StackPtrCase = CurrExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
					STARS_ea_t PreLoopDefAddr = CurrExpr->GetLeftPreLoopDefAddr();
					bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
					if (!StackPtrCase && (!PseudoAddr)) {
						// We don't want to trace callee mem exprs all the way back to this function's InArgs.
						//  We only want to trace back to the value that comes into the loop. The loop boundary
						//  crossing is recorded in the expression.
						STARSExpression *LoopBoundaryExpr = CurrExpr->Clone();
						LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
						LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
						LoopBoundaryExpr->EmitSPARKAda(BodyFile, !StackPtrCase, false, false, HasArgs);
					}
					else {
						CurrExpr->EmitSPARKAda(BodyFile, !StackPtrCase, false, false, HasArgs);
						if ((0 != IncomingStackDelta) && StackPtrCase) {
							// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
							SMP_fprintf(BodyFile, " + 16#%x#", IncomingStackDelta);
						}
					}
					SMP_fprintf(BodyFile, ", %u) ", ByteWidth);
					++OutputCount;
				}
			} // end for all elements of MemAddrExprWidthsFromCallees[LoopIndex]
			SMP_fprintf(BodyFile, "\n\t\tand X86.InSafeRegion64(i, X86.RSP - 8)"); // only for CalleeWrites case
		} // end if CalleeWrites

		if (LoopInvariantWrites) {
			size_t OutputCount;
			if (!(StackWritten || HasArgs)) {
				OutputCount = 0;
			}
			else {
				OutputCount = 1;
			}
			for (size_t i = 0; i < this->GetNumInArgsUsedInMemWrites(LoopNumPlusOne); ++i) {
				STARSExprSetIter CurrExprIter = this->GetInArgExprUsedInMemWrite(LoopNumPlusOne, i);
				STARSExpression *CurrExpr = (*CurrExprIter);
				if (!CurrExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
					size_t ByteWidth = this->GetInArgMemWriteWidth(LoopNumPlusOne, i);
					if (OutputCount == 0) {
						SMP_fprintf(BodyFile, "pragma Loop_Invariant(for all i in Unsigned64 => (if not X86.InRange64(i, ");
					}
					else {
						SMP_fprintf(BodyFile, "\n\t\tand not X86.InRange64(i, ");
					}
					// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
					//  this loop. We need to make two adjustments when printing these exprs:
					//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
					//   2. Offset the stack address with the negated incoming stack offset.
					bool StackPtrCase = CurrExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
					STARS_ea_t PreLoopDefAddr = CurrExpr->GetLeftPreLoopDefAddr();
					bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
					if (!StackPtrCase && (!PseudoAddr)) {
						// We don't want to trace callee mem exprs all the way back to this function's InArgs.
						//  We only want to trace back to the value that comes into the loop. The loop boundary
						//  crossing is recorded in the expression.
						STARSExpression *LoopBoundaryExpr = CurrExpr->Clone();
						LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
						LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
						LoopBoundaryExpr->EmitSPARKAda(BodyFile, !StackPtrCase, false, false, HasArgs);
					}
					else {
						CurrExpr->EmitSPARKAda(BodyFile, !StackPtrCase, false, false, HasArgs);
						if ((0 != IncomingStackDelta) && StackPtrCase) {
							// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
							SMP_fprintf(BodyFile, " + 16#%x#", IncomingStackDelta);
						}
					}
					SMP_fprintf(BodyFile, ", %u) ", ByteWidth);
					++OutputCount;
				}
			} // end for all elements of GetNumInArgsUsedInMemWrites(LoopNumPlusOne)
			SMP_fprintf(BodyFile, "\n\t\tand X86.InSafeRegion64(i, X86.RSP - 8)"); // only for CalleeWrites case
		} // end if LoopInvariantWrites

		SMP_fprintf(BodyFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Loop_Entry(i))));\n");
	}

	if (NeedsOutput) {
		SMP_fprintf(HeaderFile, ");\n\n"); // terminate the postcondition section
	}
	else { // nothing printed
		SMP_fprintf(HeaderFile, ";\n\n"); // terminate the specification
	}

	return NeedsOutput;
} // end of SMPFunction::EmitSPARKLoopMemRangePostCondition()
#else
// specify mem ranges that loop changes; return true if anything is printed
bool SMPFunction::EmitSPARKLoopMemRangePostCondition(FILE *HeaderFile, FILE *BodyFile, STARS_ea_t LoopAddr, bool PostPrintStarted) {
	bool UseFP = this->UsesFramePointer();
	SMPBasicBlock *LoopHeaderBlock = this->GetBlockFromInstAddr(LoopAddr);
	int HeaderBlockNum = LoopHeaderBlock->GetNumber();
	int LoopNum = this->GetLoopNumFromHeaderBlockNum(HeaderBlockNum);
	assert(0 <= LoopNum);
	size_t LoopIndex = (size_t) LoopNum;
	size_t LoopNumPlusOne = LoopIndex + 1;
	STARS_sval_t IncomingStackDelta = (*LoopHeaderBlock->GetFirstInst())->GetStackPtrOffset();
	assert(0 >= IncomingStackDelta);
	IncomingStackDelta = (0 - IncomingStackDelta); // convert to positive offset to correct callee mem exprs
	SMPInstr *FirstInst = this->GetInstFromAddr(LoopAddr);
	bool StackWritten = (this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet() || this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet());
	bool IndirectStackWrites = this->LoopMemExprsExpandToStackOffsets[LoopIndex];
	bool CalleeWrites = this->LoopHasCalleeMemWrites[LoopIndex];
	bool HasArgs = this->DoesLoopHaveArgs(LoopNum);
	StackWritten = (StackWritten || IndirectStackWrites);
	bool LoopInvariantWrites = (0 < this->GetNumInArgsUsedInMemWrites(LoopNumPlusOne));
	bool HasBIVInitialValue = (this->LoopAnalyzedBIVIters[LoopNum] != this->LoopInductionVars[LoopNum].end()); // successful loop analysis

	// Use sets of exprs to avoid duplication and streamline the output section.
	bool HasRangeExprs = (!this->TempRangeExprWidthIters.empty());
	bool HasNonRangeExprs = (!this->TempNonRangeExprWidthIters.empty());
	bool HasRelationalExprs = (!this->RelationalMemWriteWidths[LoopNumPlusOne].empty());

	// Besides memory writing contracts, nother pragma Loop_Invariant that has a corresponding post-condition is to assert
	//  that a register is loop-invariant if it is NOT in the OutputRegs bitset for this loop,
	//  and it is in the CalleePreservedRegsByLoop bitset for this loop. Find all such registers.
	bool InvariantPreservedRegs = false;
	std::bitset<1 + MD_LAST_REG_NO> LoopPreservedRegs;
	for (size_t RegNo = 0; RegNo < MD_LAST_REG_NO; ++RegNo) {
		if (!this->OutputRegsByLoop[LoopIndex][RegNo] && this->CalleePreservedRegsByLoop[LoopIndex][RegNo]) {
			// The fact that the register is preserved by callees will force it to be In_Out. However, the
			//  absence of an OutputRegs bit means that the most recent thing to happen to it in the call chain
			//  was its restoration by a function in the callee chain. It is, therefore, in the confusing
			//  category of "loop-invariant but declared Global In_Out." We need to tell the prover that it is
			//  loop-invariant, and also emit a post-condition that helps propagate this info to callers.
			InvariantPreservedRegs = true;
			LoopPreservedRegs.set(RegNo);
		}
	}

	// Emit memory writing range post-conditions and nearly identical loop invariants.
	bool NeedsMemoryOutput = (StackWritten || HasArgs || HasRangeExprs || HasNonRangeExprs || HasRelationalExprs);
	bool NeedsOutput = (NeedsMemoryOutput || InvariantPreservedRegs);
	bool LoopRangeWritten = false;
	bool LocalPostPrintStarted = PostPrintStarted;
	if (NeedsOutput) {
		size_t OutputCount = 0;
		if (!LocalPostPrintStarted) {
			if (this->LoopHasPreconditions[LoopIndex]) {
				SMP_fprintf(HeaderFile, "\tPost => ");
			}
			else {
				// No precondition output; need to put comma and new line after Global output section.
				SMP_fprintf(HeaderFile, ",\n\tPost => ");
			}
		}
		if (InvariantPreservedRegs) {
			for (size_t RegNo = 0; RegNo < LoopPreservedRegs.size(); ++RegNo) {
				if (LoopPreservedRegs[RegNo]) {
					if (!LocalPostPrintStarted) {
						LocalPostPrintStarted = true;
					}
					else {
						SMP_fprintf(HeaderFile, "\n\tand (");
					}
					STARSOpndTypePtr RegOp = FirstInst->MakeRegOpnd((STARS_regnum_t) RegNo);
					FirstInst->PrintSPARKAdaOperand(RegOp, HeaderFile, false, UseFP, true, false, false);
					SMP_fprintf(HeaderFile, "= ");
					FirstInst->PrintSPARKAdaOperand(RegOp, HeaderFile, false, UseFP, true, true, false);
					SMP_fprintf(HeaderFile, "'Old) ");
				}
			}
		}

		if (StackWritten) {
			this->EmitSPARKStackMemRangePostCondition(HeaderFile, LoopIndex, LoopAddr, PostPrintStarted);
		}
		if (HasRangeExprs) {
			if (!StackWritten) {
				if (PostPrintStarted) {
					SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
				}
				else { // nothing written yet since "Post => " except maybe preserved regs
					SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
				}
			}
			else {
				SMP_fprintf(HeaderFile, "\n\t\tand not X86.InMemoryRange(i, ");
			}
			size_t VectorLimit = this->TempRangeExprWidthIters.size();
			for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
				STARSExpression *LowerExpr = (*this->TempRangeExprWidthIters[VectorIndex].first);
				STARSExpression *UpperExpr = (*this->TempRangeExprWidthIters[VectorIndex].second);
				this->EmitSPARKMemRange(HeaderFile, false, true, HasArgs, LowerExpr, UpperExpr, OutputCount, 0);
			}
		}

		if (HasRelationalExprs) {
			if (0 == OutputCount) {
				if (!StackWritten) {
					if (PostPrintStarted) {
						SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
					}
					else { // nothing written yet since "Post => "
						SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if not X86.InMemoryRange(i, ");
					}
				}
				else {
					SMP_fprintf(HeaderFile, "\n\t\tand not X86.InMemoryRange(i, ");
				}
			}
			std::list<std::pair<std::size_t, std::pair<STARSExprSetIter, STARSExprSetIter> > >::const_iterator RelationalIter;
			for (RelationalIter = this->RelationalMemWriteWidths[LoopNumPlusOne].cbegin(); RelationalIter != this->RelationalMemWriteWidths[LoopNumPlusOne].cend(); ++RelationalIter) {
				size_t MemWidth = (*RelationalIter).first;
				STARSExprSetIter LowerExprIter = (*RelationalIter).second.first;
				STARSExprSetIter UpperExprIter = (*RelationalIter).second.second;
				this->EmitSPARKMemRange(HeaderFile, false, true, HasArgs, (*LowerExprIter), (*UpperExprIter), OutputCount, MemWidth);
			}
		} // end if (HasRelationalExprs)

		if (HasNonRangeExprs) {
			size_t VectorLimit = this->TempNonRangeExprWidthIters.size();
			for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
				// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
				//  which would require printing the same operand twice.
				size_t ByteWidth = this->TempNonRangeExprWidthIters[VectorIndex].first;
				STARSExpression *NonRangeExpr = (*this->TempNonRangeExprWidthIters[VectorIndex].second);

				// The StackPtrCase below is now handled via the {Posi,Nega}tiveStackOffsetWritesByLoop[] vectors
				if (!NonRangeExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
					if (OutputCount == 0) {
						if (PostPrintStarted) {
							SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if not X86.InRange64(i, ");
						}
						else { // nothing written since "Post => "
							SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if not X86.InRange64(i, ");
						}
					}
					else {
						SMP_fprintf(HeaderFile, "\n\t\tand not X86.InRange64(i, ");
					}
					// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
					//  this loop. We need to make two adjustments when printing these exprs:
					//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
					//   2. Offset the stack address with the negated incoming stack offset.
					bool StackPtrCase = (!NonRangeExpr->HasLeftSubTree()) && NonRangeExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
					STARS_ea_t PreLoopDefAddr = NonRangeExpr->GetLeftPreLoopDefAddr();
					bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
					if (!StackPtrCase && (!PseudoAddr)) {
						// We don't want to trace callee mem exprs all the way back to this function's InArgs.
						//  We only want to trace back to the value that comes into the loop. The loop boundary
						//  crossing is recorded in the expression.
						STARSExpression *LoopBoundaryExpr = NonRangeExpr->Clone();
						LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
						LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
						LoopBoundaryExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, true, false, HasArgs, false);
					}
					else {
						NonRangeExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, true, false, HasArgs, false);
						if ((0 != IncomingStackDelta) && StackPtrCase) {
							// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
							SMP_fprintf(HeaderFile, " + 16#%x#", IncomingStackDelta);
						}
					}
					SMP_fprintf(HeaderFile, ", %u) ", ByteWidth);
					++OutputCount;
				} // end if (!NonRangeExpr->IsStackPtrPlusOffset())
			} // end for VectorIndex over all NonRangeExprWidthIters
		} // end if (HasNonRangeExprs)

		if (CalleeWrites) {
			SMP_fprintf(HeaderFile, "\n\t\tand X86.InSafeRegion64(i, X86.RSP - 8)"); // only for CalleeWrites case
		} // end if CalleeWrites

		SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i)))");

		//   ********** LOOP INVARIANTS ******************
		// Emit similar loop invariants that mimic the post-conditions.
		PrintSPARKIndentTabs(BodyFile);
		OutputCount = 0; // reset for loop invariants
		if (HasBIVInitialValue) { // Need loop invariant for basic induction var modulo value if increment step is not 1
			// NOTE: We could have more than one BIV. Need to expand.
			STARSInductionVarFamilyList::const_iterator BIVIter = this->LoopAnalyzedBIVIters[LoopIndex];
			assert(BIVIter != this->LoopInductionVars[LoopIndex].end());
			STARSOpndTypePtr BIVIncrementOp = BIVIter->BasicInductionVar.Addend.GetOp();
			if (BIVIncrementOp->IsImmedOp()) {
				STARS_sval_t IncrementValue = (STARS_sval_t) BIVIncrementOp->GetImmedValue();
				if (0 > IncrementValue)
					IncrementValue = (0 - IncrementValue); // make it positive by taking absolute value
				if (IncrementValue > 1) { // not unit stride
					// Print "pragma Loop_Invariant(((X86.RAX - X86.RAX'Loop_Entry) mod IncrementValue) = 0);
					STARS_ea_t BIVInitAddr = BIVIter->BIVIncomingDefAddr;
					assert(STARS_BADADDR != BIVInitAddr);
					STARSOpndTypePtr BIVDefOp = BIVIter->BasicInductionVar.InductionVar.GetOp();
					SMPInstr *BIVInitInst = this->GetInstFromAddr(BIVInitAddr);
					assert(nullptr != BIVInitInst);
					SMP_fprintf(BodyFile, "pragma Loop_Invariant(((");
					BIVInitInst->PrintSPARKAdaOperand(BIVDefOp, BodyFile, false, UseFP, true, false, false);
					SMP_fprintf(BodyFile, "- ");
					BIVInitInst->PrintSPARKAdaOperand(BIVDefOp, BodyFile, false, UseFP, true, true, false);
					SMP_fprintf(BodyFile, "'Loop_Entry) mod %d) = 0);\n", IncrementValue);
					PrintSPARKIndentTabs(BodyFile);
					// ++OutputCount; // leave count 0 for mem range exprs below (starting new loop invariants)
				}
			}
		}	
		if (StackWritten) {
			SMP_fprintf(BodyFile, "pragma Loop_Invariant(for all i in Unsigned64 => (if ");
			this->EmitSPARKStackMemRangePostCondition(BodyFile, LoopIndex, LoopAddr, false);
			++OutputCount;
		}

		this->EmitSPARKMemRangeLoopInvariants(BodyFile, LoopIndex, IncomingStackDelta, true, OutputCount);

		// Another pragma Loop_Invariant that has a corresponding post-condition is to assert
		//  that a register is loop-invariant if it is NOT in the OutputRegs bitset for this loop,
		//  and it is in the CalleePreservedRegsByLoop bitset for this loop.
		for (size_t RegNo = 0; RegNo < LoopPreservedRegs.size(); ++RegNo) {
			if (LoopPreservedRegs[RegNo]) {
				PrintSPARKIndentTabs(BodyFile);
				SMP_fprintf(BodyFile, "pragma Loop_Invariant(");
				STARSOpndTypePtr RegOp = FirstInst->MakeRegOpnd((STARS_regnum_t) RegNo);
				FirstInst->PrintSPARKAdaOperand(RegOp, BodyFile, false, UseFP, true, false, false);
				SMP_fprintf(BodyFile, "= ");
				FirstInst->PrintSPARKAdaOperand(RegOp, BodyFile, false, UseFP, true, true, false);
				SMP_fprintf(BodyFile, "'Loop_Entry);\n");
			}
		}
		if (InvariantPreservedRegs) {
			PrintSPARKIndentTabs(BodyFile);
		}
	} // end if (NeedsOutput)

	if (NeedsOutput) {
		SMP_fprintf(HeaderFile, ");\n\n"); // terminate the postcondition section
	}
	else { // nothing printed
		SMP_fprintf(HeaderFile, ";\n\n"); // terminate the specification
	}

	return NeedsOutput;
} // end of SMPFunction::EmitSPARKLoopMemRangePostCondition()
#endif

// Emit only the mem range Loop_Invariant (or pragma Assert, if LoopInvariant is false)
void SMPFunction::EmitSPARKMemRangeLoopInvariants(FILE *BodyFile, size_t LoopIndex, STARS_sval_t IncomingStackDelta, bool LoopInvariant, size_t &OutputCount) {
	size_t LoopNumPlusOne = LoopIndex + 1;
	bool HasRangeExprs = (!this->TempRangeExprWidthIters.empty());
	bool HasNonRangeExprs = (!this->TempNonRangeExprWidthIters.empty());
	bool HasRelationalExprs = (!this->RelationalMemWriteWidths[LoopNumPlusOne].empty());
	bool CalleeWrites = this->LoopHasCalleeMemWrites[LoopIndex];
	bool HasArgs = this->DoesLoopHaveArgs((int) LoopIndex);
	string ContractString;
	if (LoopInvariant)
		ContractString = "Loop_Invariant";
	else
		ContractString = "Assert";

	if (HasRangeExprs) {
		if (0 == OutputCount) {
			SMP_fprintf(BodyFile, "pragma %s(for all i in Unsigned64 => (if not X86.InMemoryRange(i, ", ContractString.c_str());
		}
		else {
			SMP_fprintf(BodyFile, "\n\t\tand not X86.InMemoryRange(i, ");
		}
		size_t VectorLimit = this->TempRangeExprWidthIters.size();
		for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
			STARSExpression *LowerExpr = (*this->TempRangeExprWidthIters[VectorIndex].first);
			STARSExpression *UpperExpr = (*this->TempRangeExprWidthIters[VectorIndex].second);
			this->EmitSPARKMemRange(BodyFile, false, true, HasArgs, LowerExpr, UpperExpr, OutputCount, 0);
		}
	}
	if (HasRelationalExprs) {
		if (0 == OutputCount) {
			SMP_fprintf(BodyFile, "pragma %s(for all i in Unsigned64 => (if not X86.InMemoryRange(i, ", ContractString.c_str());
		}
		else {
			SMP_fprintf(BodyFile, "\n\t\tand not X86.InMemoryRange(i, ");
		}
		std::list<std::pair<std::size_t, std::pair<STARSExprSetIter, STARSExprSetIter> > >::const_iterator RelationalIter;
		for (RelationalIter = this->RelationalMemWriteWidths[LoopNumPlusOne].cbegin(); RelationalIter != this->RelationalMemWriteWidths[LoopNumPlusOne].cend(); ++RelationalIter) {
			size_t MemWidth = (*RelationalIter).first;
			STARSExprSetIter LowerExprIter = (*RelationalIter).second.first;
			STARSExprSetIter UpperExprIter = (*RelationalIter).second.second;
			this->EmitSPARKMemRange(BodyFile, false, true, HasArgs, (*LowerExprIter), (*UpperExprIter), OutputCount, MemWidth);
		}
	}

	if (HasNonRangeExprs) {
		size_t VectorLimit = this->TempNonRangeExprWidthIters.size();
		for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
			// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
			//  which would require printing the same operand twice.
			size_t ByteWidth = this->TempNonRangeExprWidthIters[VectorIndex].first;
			STARSExpression *NonRangeExpr = (*this->TempNonRangeExprWidthIters[VectorIndex].second);

			// The StackPtrCase below is now handled via the {Posi,Nega}tiveStackOffsetWritesByLoop[] vectors
			if (!NonRangeExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
				if (OutputCount == 0) {
					SMP_fprintf(BodyFile, "pragma %s(for all i in Unsigned64 => (if not X86.InRange64(i, ", ContractString.c_str());
				}
				else {
					SMP_fprintf(BodyFile, "\n\t\tand not X86.InRange64(i, ");
				}
				// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
				//  this loop. We need to make two adjustments when printing these exprs:
				//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
				//   2. Offset the stack address with the negated incoming stack offset.
				bool StackPtrCase = (!NonRangeExpr->HasLeftSubTree()) && NonRangeExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
				STARS_ea_t PreLoopDefAddr = NonRangeExpr->GetLeftPreLoopDefAddr();
				bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
				if (!StackPtrCase && (!PseudoAddr)) {
					// We don't want to trace callee mem exprs all the way back to this function's InArgs.
					//  We only want to trace back to the value that comes into the loop. The loop boundary
					//  crossing is recorded in the expression.
					STARSExpression *LoopBoundaryExpr = NonRangeExpr->Clone();
					LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
					LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
					LoopBoundaryExpr->EmitSPARKAda(BodyFile, !StackPtrCase, false, false, HasArgs, false);
				}
				else {
					NonRangeExpr->EmitSPARKAda(BodyFile, !StackPtrCase, false, false, HasArgs, false);
					if ((0 != IncomingStackDelta) && StackPtrCase) {
						// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
						SMP_fprintf(BodyFile, " + 16#%x#", IncomingStackDelta);
					}
				}
				SMP_fprintf(BodyFile, ", %u) ", ByteWidth);
				++OutputCount;
			} // end if (!NonRangeExpr->IsStackPtrPlusOffset())
		} // end for VectorIndex over all NonRangeExprWidthIters
	} // end if (HasNonRangeExprs)

	if (CalleeWrites) {
		SMP_fprintf(BodyFile, "\n\t\tand X86.InSafeRegion64(i, X86.RSP - 8)"); // only for CalleeWrites case
	} // end if CalleeWrites

	SMP_fprintf(BodyFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Loop_Entry(i))));\n");

	return;
} // SMPFunction::EmitSPARKMemRangeLoopInvariants()

// collect temporary containers of mem exprs for one loop.
void SMPFunction::AggregateLoopMemExprs(size_t LoopIndex) {
	size_t LoopNumPlusOne = LoopIndex + 1;
	pair<STARSExprSetIter, bool> InsertResult, InsertResult2;
	// Clear out temp containers that might have info from an enclosing loop.
	this->TempRangeExprWidthIters.clear();
	this->TempNonRangeExprWidthIters.clear();
	this->TempNonRangeExprs.clear();
	this->TempLowerBoundsExprs.clear();
	this->TempUpperBoundsExprs.clear();
	this->RelationalMemWriteWidths[LoopNumPlusOne].clear();
	this->RelationalLowerBoundExprs[LoopNumPlusOne].clear();
	this->RelationalUpperBoundExprs[LoopNumPlusOne].clear();

	if (this->NonStackFrameLoopMemWrites[LoopIndex]) { 
		STARSExprBoundsIter ExprIter;
		for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
			STARSExpression *LowerExpr = ExprIter->first;
			STARSExpression *UpperExpr = ExprIter->second;
			if (!IsLocalStackFrameExprPair(LowerExpr, UpperExpr)) { // not just local var write
				InsertResult = this->TempLowerBoundsExprs.insert(LowerExpr);
				InsertResult2 = this->TempUpperBoundsExprs.insert(UpperExpr);
				if (InsertResult.second || InsertResult2.second) {
					// New range, because at least one insert was not a duplicate.
					pair<STARSExprSetIter, STARSExprSetIter> InsertValue(InsertResult.first, InsertResult2.first);
					this->TempRangeExprWidthIters.push_back(InsertValue);
				}
			}
		} // end for all loop memory write exprs
	}

	// Two cases. (1) From outside any loop, we call foo() and it has writes inside a loop.
	//  (2) From inside a loop, we call foo() and it has writes inside a loop.
	// Case (2) is more complex, as we need to analyze the worst-case range of mem addresses
	//  given that we have nested loops, albeit across procedure boundaries.
	// Case (1) is found in index 0 of vector this->LoopMemAddrExprsFromCalleeLoops[] while
	//  case (2) is found at index [LoopIndex + 1] in the same vector.
	list<std::pair<STARSExprSetIter, size_t> >::const_iterator MemExprIter;
	for (MemExprIter = this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].cbegin(); MemExprIter != this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].cend(); ++MemExprIter) {
		pair<STARSExprSetIter, size_t> MemExprItem = (*MemExprIter);
		size_t MemWidth = MemExprItem.second;
		STARSExprSetIter ExprSetIter = MemExprItem.first;
		if ((*ExprSetIter)->IsStackPtrPlusOffset())
			continue;
		else if ((*ExprSetIter)->IsRelationalExpr()) {
			SMP_msg("INFO: SPARK: Saved LoopMemAddrFromCallees relational expr in EmitSPARKProcPrePostMemConditions for LoopPlusOne: %zu Func: %s\n",
				LoopNumPlusOne, this->GetFuncName());
			// We have already computed the upper bound taking into account the ByteWidth, so pass
			//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
			this->SplitAndSaveRelationalExpr(true, LoopNumPlusOne, 0, (*ExprSetIter));
			continue;
		}
		else { // non-range expr
			InsertResult = this->TempNonRangeExprs.insert(*ExprSetIter);
			if (InsertResult.second) {
				// Unique new expr.
				pair<size_t, STARSExprSetIter> InsertValue(MemWidth, InsertResult.first);
				this->TempNonRangeExprWidthIters.push_back(InsertValue);
			}
		}
	} // end for MemExprIter from LoopMemAddrExprWidthsFromCalleeLoops

	for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopNumPlusOne].size(); ++i) {
		if (this->NonStackFrameCalleeMemWrites[LoopNumPlusOne][i]) { // not just local stack frame write
			size_t MemWidth = this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].second;
			STARSExpression *CalleeExpr = (*this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].first);
			if (CalleeExpr->IsRelationalExpr()) {
				SMP_msg("INFO: SPARK: Saved MemExprFromCallees relational expr in EmitSPARKProcPrePostMemConditions for LoopPlusOne: %zu Func: %s\n",
					LoopNumPlusOne, this->GetFuncName());
				// We have already computed the upper bound taking into account the ByteWidth, so pass
				//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
				this->SplitAndSaveRelationalExpr(true, LoopNumPlusOne, 0, CalleeExpr);
				continue;
			}
			else {
				InsertResult = this->TempNonRangeExprs.insert(CalleeExpr);
				if (InsertResult.second) {
					// Unique new expr.
					pair<size_t, STARSExprSetIter> InsertValue(MemWidth, InsertResult.first);
					this->TempNonRangeExprWidthIters.push_back(InsertValue);
				}
			}
		}
	} // end for i in this->MemAddrExprWidthsFromCallees[LoopNumPlusOne]

	for (size_t i = 0; i < this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne].size(); ++i) {
		STARSExprSetIter InArgExprIter = this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne][i].first;
		STARSExpression *InArgExpr = (*InArgExprIter);
		size_t MemWidth = (size_t) this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne][i].second;
		if (InArgExpr->IsRelationalExpr()) {
			SMP_msg("INFO: SPARK: Saved InArg relational expr in AggregateLoopMemExprs for LoopPlusOne: %zu Func: %s\n",
				LoopNumPlusOne, this->GetFuncName());
			// We have already computed the upper bound taking into account the ByteWidth, so pass
			//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
			this->SplitAndSaveRelationalExpr(true, LoopNumPlusOne, 0, InArgExpr);
			continue;
		}
		else {
			InsertResult = this->TempNonRangeExprs.insert((*this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne][i].first));
			if (InsertResult.second) {
				// Unique new expr.
				pair<size_t, STARSExprSetIter> InsertValue(MemWidth, InsertResult.first);
				this->TempNonRangeExprWidthIters.push_back(InsertValue);
			}
		}
	} // end for i in current LoopIndexPlusOne size()

	return;
} // end of SMPFunction::AggregateLoopMemExprs()

#if 0
// specify stack mem ranges that loop changes
void SMPFunction::EmitSPARKStackMemRangePostCondition(FILE *HeaderFile, std::size_t LoopIndex, STARS_ea_t LoopAddr) {
	SMPInstr *FirstInst = this->GetInstFromAddr(LoopAddr);
	assert(nullptr != FirstInst);
	STARS_sval_t IncomingStackDelta = FirstInst->GetStackPtrOffset();
	bool LoopInvariantCase = (HeaderFile == global_STARS_program->GetSPARKSourceFile());
	bool FirstOutputEmitted = false;

	if (!LoopInvariantCase) {
		SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if ");
	}
	if (this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet()) {
		int FirstBitInRange = -1;
		for (size_t i = 0; i < this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits(); ++i) {
			if (this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].GetBit(i)) {
				if (0 > FirstBitInRange) {
					// We need to start a new range.
					FirstBitInRange = (int) i;
				}
			}
			else {
				if (0 <= FirstBitInRange) {
					// We are working on a range that is now ending.
					STARS_sval_t LowestOffset = (-IncomingStackDelta) + (STARS_sval_t) FirstBitInRange;
					STARS_sval_t LimitOffset = (-IncomingStackDelta) + (STARS_sval_t) i;
					assert(0 <= LowestOffset);
					if (FirstOutputEmitted) {
						SMP_fprintf(HeaderFile, "\n\t\tand ");
					}
					if (LoopInvariantCase) {
						if (0 != LowestOffset)
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP+%lld, X86.RSP+%lld)", (int64_t)LowestOffset, (int64_t)LimitOffset);
						else
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP, X86.RSP+%lld)", (int64_t)LimitOffset);
					}
					else {
						if (0 != LowestOffset)
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old+%lld, X86.RSP'Old+%lld)", (int64_t)LowestOffset, (int64_t)LimitOffset);
						else
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old, X86.RSP'Old+%lld)", (int64_t)LimitOffset);
					}
					FirstBitInRange = -1; // start new range
					FirstOutputEmitted = true;
				}
			}
		}
		if (0 <= FirstBitInRange) {
			// We were working on a range that is now ending at the limit.
			STARS_sval_t LowestOffset = (-IncomingStackDelta) + (STARS_sval_t) FirstBitInRange;
			STARS_sval_t LimitOffset = (-IncomingStackDelta) + (STARS_sval_t) this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits();
			assert(0 <= LowestOffset);
			if (FirstOutputEmitted) {
				SMP_fprintf(HeaderFile, "\n\t\tand ");
			}
			if (LoopInvariantCase) {
				if (0 != LowestOffset)
					SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP+%lld, X86.RSP+%lld)", (int64_t)LowestOffset, (int64_t)LimitOffset);
				else
					SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP, X86.RSP+%lld)", (int64_t)LimitOffset);
			}
			else {
				if (0 != LowestOffset)
					SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old+%lld, X86.RSP'Old+%lld)", (int64_t)LowestOffset, (int64_t)LimitOffset);
				else
					SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old, X86.RSP'Old+%lld)", (int64_t)LimitOffset);
			}
			FirstOutputEmitted = true;
		}
	}

	if (this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet()) {
		int FirstBitInRange = -1;
		STARS_sval_t NumFrameBytes = (STARS_sval_t) this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits();
		for (size_t i = 0; i < this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits(); ++i) {
			if (this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].GetBit(i)) {
				if (0 > FirstBitInRange) {
					// We need to start a new range.
					FirstBitInRange = (int) i;
				}
			}
			else {
				if (0 <= FirstBitInRange) {
					// We are working on a range that is now ending.
					STARS_sval_t LowestOffset = (-NumFrameBytes - IncomingStackDelta) + (STARS_sval_t) FirstBitInRange;
					STARS_sval_t LimitOffset = (-NumFrameBytes - IncomingStackDelta) + (STARS_sval_t) i;
					if (FirstOutputEmitted) {
						SMP_fprintf(HeaderFile, "\n\t\tand ");
					}
					if (LoopInvariantCase) {
						if (0 != LowestOffset)
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP+%lld, X86.RSP+%lld)", (int64_t)LowestOffset, (int64_t)LimitOffset);
						else
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP, X86.RSP+%lld)", (int64_t)LimitOffset);
					}
					else {
						if (0 != LowestOffset)
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old+%lld, X86.RSP'Old+%lld)", (int64_t)LowestOffset, (int64_t)LimitOffset);
						else
							SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old, X86.RSP'Old+%lld)", (int64_t)LimitOffset);
					}
					FirstBitInRange = -1; // start new range
					FirstOutputEmitted = true;
				}
			}
		}
		if (0 <= FirstBitInRange) {
			// We were working on a range that is now ending at the limit.
			STARS_sval_t LowestOffset = (-NumFrameBytes - IncomingStackDelta) + (STARS_sval_t) FirstBitInRange;
			if (FirstOutputEmitted) {
				SMP_fprintf(HeaderFile, "\n\t\tand ");
			}
			if (LoopInvariantCase) {
				SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP%lld, X86.RSP)", (int64_t)LowestOffset);
			}
			else {
				SMP_fprintf(HeaderFile, "not X86.InMemoryRange(i, X86.RSP'Old%lld, X86.RSP'Old)", (int64_t)LowestOffset);
			}
			FirstOutputEmitted = true;
		}
	}

	return;
} // end of SMPFunction::EmitSPARKStackMemRangePostCondition()

#else // print one byte address on the stack at a time instead of using range functions
// specify stack mem ranges that loop changes
void SMPFunction::EmitSPARKStackMemRangePostCondition(FILE *HeaderFile, std::size_t LoopIndex, STARS_ea_t LoopAddr, bool PostPrintStarted) {
	SMPInstr *FirstInst = this->GetInstFromAddr(LoopAddr);
	assert(nullptr != FirstInst);
	STARS_sval_t IncomingStackDelta = FirstInst->GetStackPtrOffset();
	bool LoopInvariantCase = (HeaderFile == global_STARS_program->GetSPARKSourceFile()); // if HeaderFile is really the body file
	size_t OutputCount = 0;  // counter; do two outputs per line

	if (!LoopInvariantCase) {
		if (PostPrintStarted) {
			SMP_fprintf(HeaderFile, "\n\tand (for all i in Unsigned64 => (if ");
		}
		else {
			SMP_fprintf(HeaderFile, " (for all i in Unsigned64 => (if ");
		}
	}
	if (this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet()) {
		int FirstBitInRange = -1;
		for (size_t i = 0; i < this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits(); ++i) {
			if (this->PositiveOffsetStackBytesWrittenByLoop[LoopIndex].GetBit(i)) {
				if (0 != OutputCount) {
					if (0 == (OutputCount % 2)) { // printed multiple of 2, start new line
						SMP_fprintf(HeaderFile, " and\n\t\t");
					}
					else {
						SMP_fprintf(HeaderFile, " and ");
					}
				}
				else {
					SMP_fprintf(HeaderFile, "\n\t\t");
				}


				STARS_sval_t Offset = (-IncomingStackDelta) + (STARS_sval_t)i;
				assert(0 <= Offset);
				if (0 != Offset)
					SMP_fprintf(HeaderFile, "(i /= X86.RSP+16#%llx#)", (uint64_t) Offset);
				else
					SMP_fprintf(HeaderFile, "(i /= X86.RSP)");
				++OutputCount;
			}
		} // end for all positive offset stack bytes written
	}

	if (this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].IsAnyBitSet()) {
		STARS_sval_t NumFrameBytes = (STARS_sval_t) this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits();
		for (size_t i = 0; i < this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].GetNumBits(); ++i) {
			if (this->NegativeOffsetStackBytesWrittenByLoop[LoopIndex].GetBit(i)) {
				STARS_sval_t Offset = (-NumFrameBytes - IncomingStackDelta) + (STARS_sval_t) i;
				if (0 != OutputCount) {
					if (0 == (OutputCount % 2)) { // printed multiple of 2, start new line
						SMP_fprintf(HeaderFile, " and\n\t\t");
					}
					else {
						SMP_fprintf(HeaderFile, " and ");
					}
				}
				else {
					SMP_fprintf(HeaderFile, "\n\t\t");
				}

				if (0 < Offset)
					SMP_fprintf(HeaderFile, "(i /= X86.RSP+16#%llx#)", (uint64_t) Offset);
				else if (0 > Offset) 
					SMP_fprintf(HeaderFile, "(i /= X86.RSP-16#%llx#)", (uint64_t) (0 - Offset));
				else
					SMP_fprintf(HeaderFile, "(i /= X86.RSP)");
				++OutputCount;
			}
		} // end for all negative offset stack bytes written
	}

	if (this->LoopMemExprsExpandToStackOffsets[LoopIndex]) {
		STARS_sval_t StackDeltaBias = (-IncomingStackDelta);

		STARSExprBoundsIter ExprIter;
		for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
			STARSExpression *LowerExpr = ExprIter->first;
			STARSExpression *UpperExpr = ExprIter->second;
			bool StackExprFound = ((nullptr != LowerExpr) && (nullptr != UpperExpr))
				&& (LowerExpr->IsStackPtrPlusOffset() || UpperExpr->IsStackPtrPlusOffset());
			if (StackExprFound) {
				if (0 < OutputCount) {
					SMP_fprintf(HeaderFile, "\n\t\t and not X86.InMemoryRange(i, ");
				}
				else {
					SMP_fprintf(HeaderFile, "\n\t\t not X86.InMemoryRange(i, ");
				}
				LowerExpr->EmitSPARKAda(HeaderFile, false, !LoopInvariantCase, false, false, false);
				if (StackDeltaBias > 0) {
					SMP_fprintf(HeaderFile, "+ 16#%llx# ", (uint64_t)StackDeltaBias);
				}
				SMP_fprintf(HeaderFile, ", ");
				UpperExpr->EmitSPARKAda(HeaderFile, false, !LoopInvariantCase, false, false, false);
				if (StackDeltaBias > 0) {
					SMP_fprintf(HeaderFile, "+ 16#%llx# ", (uint64_t)StackDeltaBias);
				}
				SMP_fprintf(HeaderFile, ") ");
				++OutputCount;
			}
		} // end for all loop mem write exprs
	}

	return;
} // end of SMPFunction::EmitSPARKStackMemRangePostCondition()
#endif

// emit loop invariant for basic induction var bound
void SMPFunction::EmitSPARKAdaForLoopLimit(FILE *BodyFile, STARS_ea_t LoopAddr) {
	SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(LoopAddr);
	assert(nullptr != CurrBlock);
	int LoopHeaderBlockNum = CurrBlock->GetNumber();
	int LoopNum = this->GetLoopNumFromHeaderBlockNum(LoopHeaderBlockNum);
	assert(0 <= LoopNum);
	assert(LoopNum < (int)this->LoopCount);
	size_t LoopIndex = (size_t) LoopNum;

	if ((this->LoopIterationsLimitExprs[LoopIndex] != nullptr) 
		&& (this->LoopAnalyzedBIVIters[LoopIndex] != this->LoopInductionVars[LoopIndex].end())) {
		// We analyzed the loop iterations count
	
		this->EmitSPARKLoopBIVLimits(BodyFile, LoopAddr, LoopIndex, false);

		// Print range limitation on memory writing.
		bitset<1 + MD_LAST_REG_NO> RegsPrinted; // record regs printed to avoid redundant output
		size_t DummyOutputCount = 1;
		STARSExprBoundsIter ExprIter;
		for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
			STARSExpression *LowerExpr = ExprIter->first;
			STARSExpression *UpperExpr = ExprIter->second;
			if ((nullptr != LowerExpr) && (nullptr != UpperExpr)) {
				LowerExpr->PrintSPARKArgLocationStrings(BodyFile, true, LoopIndex, DummyOutputCount, RegsPrinted);
				UpperExpr->PrintSPARKArgLocationStrings(BodyFile, true, LoopIndex, DummyOutputCount, RegsPrinted);
			}
		} // end for all loop mem write exprs
	}
	return;
} // end of SMPFunction::EmitSPARKAdaForLoopLimit()

// emit LoopRegSourceExprPairs
void SMPFunction::EmitIncomingLoopRegExprs(FILE *OutputFile, size_t LoopNum, bool LoopInvariantSection) {
	list<pair<STARS_regnum_t, STARSExprSetIter> >::const_iterator LoopRegIter;
	bool HasArgs = this->DoesLoopHaveArgs((int) LoopNum);
	for (LoopRegIter = this->LoopRegSourceExprPairs[LoopNum].cbegin(); LoopRegIter != this->LoopRegSourceExprPairs[LoopNum].cend(); ++LoopRegIter) {
		STARS_regnum_t RegNo = (*LoopRegIter).first;
		STARSExprSetIter RegSourceIter = (*LoopRegIter).second;
		STARSExpression *RegSourceExpr = (*RegSourceIter);
		STARS_ea_t DefAddr = RegSourceExpr->GetParentInst()->GetAddr();
		SMPInstr *DefInst = this->GetInstFromAddr(DefAddr);
		assert(nullptr != DefInst);
		STARSOpndTypePtr DefOp = DefInst->MakeRegOpnd(RegNo);
		bool UseFP = this->UsesFramePointer();
		if (LoopInvariantSection) {
			PrintSPARKIndentTabs(OutputFile);
			SMP_fprintf(OutputFile, "pragma Loop_Invariant(");
			DefInst->PrintSPARKAdaOperand(DefOp, OutputFile, false, UseFP, true, false, false);
			SMP_fprintf(OutputFile, " = (");
			RegSourceExpr->EmitSPARKAda(OutputFile, true, true, false, HasArgs, true, false);
			SMP_fprintf(OutputFile, "));\n");
		}
		else { // must be a precondition
			SMP_fprintf(OutputFile, " and\n");
			PrintSPARKIndentTabs(OutputFile);
			SMP_fprintf(OutputFile, "\t(");
			DefInst->PrintSPARKAdaOperand(DefOp, OutputFile, false, UseFP, true, false, false);
			SMP_fprintf(OutputFile, " = (");
			RegSourceExpr->EmitSPARKAda(OutputFile, true, false, false, HasArgs, false, false);
			SMP_fprintf(OutputFile, "))");
		}
	} // end for all this->LoopRegSourceExprPairs[LoopNum]
} // end of SMPFunction::EmitIncomingLoopRegExprs()

// Emit Assumes and Loop_Invariants for loop BIV and mem ranges.
void SMPFunction::EmitSPARKLoopBIVLimits(FILE *BodyFile, STARS_ea_t LoopAddr, size_t LoopIndex, bool PragmaAssume) {
	SMPInstr *FirstInst = this->GetInstFromAddr(LoopAddr);
	STARS_sval_t IncomingStackDelta = FirstInst->GetStackPtrOffset();
	int LoopNum = (int) LoopIndex;
	bool CountdownLoop = (this->LoopIncrementValue[LoopIndex] < 0);
	bool UseFP = this->UsesFramePointer();
	bool HasArgs = this->DoesLoopHaveArgs(LoopNum);

	// Emit range limitation on the basic induction variable.
	bool SPRelative = false;
	STARSExpression *LowerLimitExpr = nullptr;

	PrintSPARKIndentTabs(BodyFile);

	if (PragmaAssume) {
		// Emit pragma Assume(lower_bound < upper_bound);
		STARSExpression *UpperLimitExpr = nullptr;
		if (!CountdownLoop) {
			LowerLimitExpr = this->LoopIterationsInitExprs[LoopIndex];
			UpperLimitExpr = this->LoopIterationsLimitExprs[LoopIndex];
			// Look only at the right hand side of the relational LimitExpr.
			assert(UpperLimitExpr->IsRelationalExpr());
			UpperLimitExpr = UpperLimitExpr->CloneRHS();
		}
		else {
			LowerLimitExpr = this->LoopIterationsLimitExprs[LoopIndex];
			UpperLimitExpr = this->LoopIterationsInitExprs[LoopIndex];
			// Look only at the right hand side of the relational LimitExpr.
			assert(LowerLimitExpr->IsRelationalExpr());
			LowerLimitExpr = LowerLimitExpr->CloneRHS();
		}
		bool BothConstExprs = (LowerLimitExpr->IsConstExpr() && UpperLimitExpr->IsConstExpr());
		// No point in an Assume on two constants
		if (!BothConstExprs) {
			SPRelative = LowerLimitExpr->IsStackPtrRegUsed();
			SMP_fprintf(BodyFile, "pragma Assume(");
			if (SPRelative) {
				SMP_fprintf(BodyFile, "( ");
				LowerLimitExpr->EmitSPARKAda(BodyFile, true, false, false, HasArgs, false, false);
				// Offset the stack pointer from its entry point value in the func that includes this loop.
				SMP_fprintf(BodyFile, " + 16#%x# )", (0 - IncomingStackDelta));
			}
			else {
				LowerLimitExpr->EmitSPARKAda(BodyFile, true, false, false, HasArgs, false, false);
			}
			SMP_fprintf(BodyFile, " < ");

			SPRelative = UpperLimitExpr->IsStackPtrRegUsed();
			if (SPRelative) {
				SMP_fprintf(BodyFile, "( ");
				UpperLimitExpr->EmitSPARKAda(BodyFile, true, false, false, HasArgs, false, false);
				// Offset the stack pointer from its entry point value in the func that includes this loop.
				SMP_fprintf(BodyFile, " + 16#%x# )", (0 - IncomingStackDelta));
			}
			else {
				UpperLimitExpr->EmitSPARKAda(BodyFile, true, false, false, HasArgs, false, false);
			}
			SMP_fprintf(BodyFile, ");\n");
		}
		return; // the loop invariants will be done on a later call
	} // end if (PragmaAssume)

	// First, emit the restriction that the BIV is always >= its lower limit.
	STARSOpndTypePtr BIVOp = this->LoopAnalyzedBIVIters[LoopIndex]->BasicInductionVar.InductionVar.GetOp();
	SMP_fprintf(BodyFile, "pragma Loop_Invariant(");
	FirstInst->PrintSPARKAdaOperand(BIVOp, BodyFile, false, UseFP, true, false, false);
	if (!CountdownLoop) {
		SMP_fprintf(BodyFile, ">= ");
		LowerLimitExpr = this->LoopIterationsInitExprs[LoopIndex];
	}
	else {
		// Countdown loop might or might not execute with BIV == lowerbound.
		if (this->LoopExecutesWithLimitValue[LoopIndex])
			SMP_fprintf(BodyFile, ">= ");
		else
			SMP_fprintf(BodyFile, "> ");
		LowerLimitExpr = this->LoopIterationsLimitExprs[LoopIndex];
	}
	SPRelative = LowerLimitExpr->IsStackPtrRegUsed();
	if (SPRelative) {
		SMP_fprintf(BodyFile, "( ");
		LowerLimitExpr->EmitSPARKAda(BodyFile, true, false, false, HasArgs, false, false);
		// Offset the stack pointer from its entry point value in the func that includes this loop.
		SMP_fprintf(BodyFile, " + 16#%x# )", (0 - IncomingStackDelta));
	}
	else {
		LowerLimitExpr->EmitSPARKAda(BodyFile, true, false, false, HasArgs, false, false);
	}
	SMP_fprintf(BodyFile, ");\n");

	// See if we have a non-constant limit for the BIV. If so, print its value, 
	//  which will be found in the right hand side of the LimitExpr.
	//  Otherwise, use the constant limit.
	assert(LoopIndex < this->LoopComparisonExprs.size());
	assert(this->LoopIterationsLimitExprs[LoopIndex]->IsRelationalExpr());
	STARSOpndTypePtr LimitOp = this->LoopComparisonExprs[LoopIndex].Operand2.GetOp();
	assert(nullptr != LimitOp);
	PrintSPARKIndentTabs(BodyFile);
	SMP_fprintf(BodyFile, "pragma Loop_Invariant(");
	FirstInst->PrintSPARKAdaOperand(BIVOp, BodyFile, false, UseFP, true, false, false);
	if (!CountdownLoop) {
		if (this->LoopExecutesWithLimitValue[LoopIndex])
			SMP_fprintf(BodyFile, "<= ");
		else
			SMP_fprintf(BodyFile, "< ");
	}
	else {
		if (this->LoopExecutesWithLimitValue[LoopIndex])
			SMP_fprintf(BodyFile, ">= ");
		else
			SMP_fprintf(BodyFile, "> ");
	}
	FirstInst->PrintSPARKAdaOperand(LimitOp, BodyFile, false, UseFP, true, false, false);
	SMP_fprintf(BodyFile, ");\n");

	// Now, print a version of the LimitOp that has been traced back to its source value
	//  if LimitOp is not a constant.
	if (!LimitOp->IsImmedOp()) {
		PrintSPARKIndentTabs(BodyFile);
		SMP_fprintf(BodyFile, "pragma Loop_Invariant(");
		FirstInst->PrintSPARKAdaOperand(BIVOp, BodyFile, false, UseFP, true, false, false);
		if (!CountdownLoop) {
			if (this->LoopExecutesWithLimitValue[LoopIndex])
				SMP_fprintf(BodyFile, "<= ");
			else
				SMP_fprintf(BodyFile, "< ");
		}
		else {
			if (this->LoopExecutesWithLimitValue[LoopIndex])
				SMP_fprintf(BodyFile, ">= ");
			else
				SMP_fprintf(BodyFile, "> ");
		}

		bool SPRelative = false;
		if (this->LoopIterationsLimitExprs[LoopIndex]->HasRightSubTree()) {
			SPRelative = this->LoopIterationsLimitExprs[LoopIndex]->GetRightTree()->IsStackPtrRegUsed();
			if (SPRelative)
				SMP_fprintf(BodyFile, "( ");
			this->LoopIterationsLimitExprs[LoopIndex]->GetRightTree()->EmitSPARKAda(BodyFile, true, false, false, HasArgs, true, false);
		}
		else { // RightOperand
			const STARSOpndTypePtr LimitInitOp = this->LoopIterationsLimitExprs[LoopIndex]->GetConstRightOperand();
			SPRelative = (LimitInitOp->IsRegOp() && LimitInitOp->MatchesReg(MD_STACK_POINTER_REG));
			if (SPRelative)
				SMP_fprintf(BodyFile, "( ");
			FirstInst->PrintSPARKAdaOperand(LimitInitOp, BodyFile, false, UseFP, true, false, true);
		}
		if (SPRelative) {
			// Offset the stack pointer from its entry point value in the func that includes this loop.
			SMP_fprintf(BodyFile, " + 16#%x# )", (0 - IncomingStackDelta));
		}
		SMP_fprintf(BodyFile, ");\n");
	} // end if (!LimitOp->IsImmedOp())
	return;
} // end of SMPFunction::EmitSPARKLoopBIVLimits()

// LoopIndex == -1 indicates main procedure; print warnings if proofs will fail
void SMPFunction::EmitAnalysisProblemWarnings(FILE *HeaderFile, int LoopIndex) {
	assert(-1 <= LoopIndex);
	size_t LoopNumPlusOne = (size_t)(1 + LoopIndex);
	if (LoopIndex < 0) { // regular procedure
		// Aggregate all loop problems into this procedure if this procedure is not marked already.
		if (!(this->SymbolicAnalysisProblems[0] && this->CalleeMemExprProblems[0])) {
			for (size_t LoopNum = 0; LoopNum < this->GetNumLoops(); ++LoopNum) {
				this->SymbolicAnalysisProblems[0] = (this->SymbolicAnalysisProblems[0] || this->SymbolicAnalysisProblems[LoopNum + 1] || this->LoopAnalysisProblems[LoopNum]);
				this->CalleeMemExprProblems[0] = (this->CalleeMemExprProblems[0] || this->CalleeMemExprProblems[LoopNum + 1]);
			}
		}
		if (this->SymbolicAnalysisProblems[0])
			SMP_fprintf(HeaderFile, "-- WARNING: Procedure had symbolic analysis problems.\n");
		if (this->CalleeMemExprProblems[0])
			SMP_fprintf(HeaderFile, "-- WARNING: Procedure had callee mem expr inheritance problems.\n");
	}
	else { // loop procedure
		if (this->LoopAnalysisProblems[(size_t) LoopIndex])
			SMP_fprintf(HeaderFile, "-- WARNING: Loop procedure had iteration or mem range analysis problems.\n");
		if (this->SymbolicAnalysisProblems[LoopNumPlusOne])
			SMP_fprintf(HeaderFile, "-- WARNING: Loop procedure had symbolic analysis problems.\n");
		if (this->CalleeMemExprProblems[LoopNumPlusOne])
			SMP_fprintf(HeaderFile, "-- WARNING: Loop procedure had callee mem expr inheritance problems.\n");
	}
	return;
} // end of SMPFunction::EmitAnalysisProblemWarnings()

// Emit SPARK Ada translation of function
void SMPFunction::EmitFuncSPARKAda(void) {
	if (this->IsLinkerStub()) {
		return;
	}

	string AdaFuncName(this->GetFuncName());
#if STARS_EMIT_ADA_FOR_MAIN_ONLY
	if (0 != strcmp("main", AdaFuncName.c_str())) {
		return;
	}
#endif
	if (IsStartupFuncName(AdaFuncName)) {
		return;
	}

	SMP_msg("INFO: Emitting SPARK for function %s\n", AdaFuncName.c_str());
	// Convert dashes to underscores in the func name to suit Ada standards.
	replace(AdaFuncName.begin(), AdaFuncName.end(), '-', '_');

	FILE *BodyFile = global_STARS_program->GetSPARKSourceFile();
	FILE *HeaderFile = global_STARS_program->GetSPARKHeaderFile();
	size_t ByteWidth = global_STARS_program->GetSTARS_ISA_Bytewidth();

	// Emit beginning of function body.
	this->TranslatingSPARKLoop = false;
	SMP_fprintf(BodyFile, "procedure %s\nis\n", AdaFuncName.c_str());
	SMP_fprintf(BodyFile, "\tSaveStackPtr : Unsigned64 := X86.RSP with Ghost;\n");
	SMP_fprintf(BodyFile, "\tra0 : Unsigned8 := X86.ReadMem8(X86.RSP) with Ghost;\n");
	for (unsigned short i = 1; i < ByteWidth; ++i) {
		SMP_fprintf(BodyFile, "\tra%u : Unsigned8 := X86.ReadMem8(X86.RSP + %u) with Ghost;\n", i, i);
	}
	this->EmitSPARKSavedArgs(BodyFile);
	SMP_fprintf(BodyFile, "\nbegin\n");
	SMP_fprintf(BodyFile, "\tpragma Assume(X86.RSP = X86.DummyRSP);\n");

	// Emit procedure specification.
	this->EmitAnalysisProblemWarnings(HeaderFile, -1);
	SMP_fprintf(HeaderFile, "procedure %s with\n", AdaFuncName.c_str());
	if (this->AltersSPARKMemory()) {
		SMP_fprintf(HeaderFile, "\tGlobal => (In_Out => (X86.Memory, X86.CarryFlag,\n");
	}
	else {
		SMP_fprintf(HeaderFile, "\tGlobal => (In_Out => (X86.CarryFlag,\n");
	}
	if (this->HasCallToNonReturningFunc() || this->HasCalleeChainWithNonReturningFunc()) {
		SMP_fprintf(HeaderFile, "\t\tX86.OverflowFlag, X86.ParityFlag, X86.SignFlag, X86.ZeroFlag, X86.Exit_Called,\n");
	}
	else {
		SMP_fprintf(HeaderFile, "\t\tX86.OverflowFlag, X86.ParityFlag, X86.SignFlag, X86.ZeroFlag,\n");
	}

	size_t OutCount = 0;
	size_t RegCount = this->OutputRegs.count();
	bool FPStackBelowTopPrinted = false;
	for (size_t RegIndex = 0; RegIndex < this->OutputRegs.size(); ++RegIndex) {
		bool FlagsReg = (RegIndex == (size_t) MD_FLAGS_REG);
		if (FlagsReg || (!(this->OutputRegs[RegIndex] || this->CalleePreservedRegs[RegIndex]))) {
			continue; // flag, or bit not set
		}
		bool CommaPrinted = false;
		bool FPRegBelowStackTop = ((RegIndex >= STARS_x86_R_st1) && (RegIndex <= STARS_x86_R_st7));
		if (!(FPRegBelowStackTop && FPStackBelowTopPrinted)) { // avoid duplicate X86.FloatingPointStackDummy1
			if (0 == OutCount) { // first line does not need another newline
				SMP_fprintf(HeaderFile, "\t\t");
				CommaPrinted = true; // printed a comma after we printed flags
			}
			else if (0 == (OutCount % 4)) {  // print 4 regs per line
				SMP_fprintf(HeaderFile, ",\n\t\t");
				CommaPrinted = true;
			}
			++OutCount;
			if (CommaPrinted) {
				SMP_fprintf(HeaderFile, "X86.%s ", MDGetSPARKRegNumName((STARS_regnum_t)RegIndex, (uint16_t)ByteWidth));
			}
			else {
				SMP_fprintf(HeaderFile, ", X86.%s ", MDGetSPARKRegNumName((STARS_regnum_t)RegIndex, (uint16_t)ByteWidth));
			}
			FPStackBelowTopPrinted |= FPRegBelowStackTop;
		}
	} // end for all entries in the OutputRegs set.
	SMP_fprintf(HeaderFile, ")"); // close In_Out list parenthesis

	OutCount = 0;
	RegCount = this->InputRegs.count();
	if (RegCount > 0) {
		for (size_t RegIndex = 0; RegIndex < this->InputRegs.size(); ++RegIndex) {
			bool FlagsReg = (RegIndex == (size_t) MD_FLAGS_REG);
			if (FlagsReg || this->OutputRegs[RegIndex] || this->CalleePreservedRegs[RegIndex] || (!this->InputRegs[RegIndex])) {
				continue; // flag, or bit not set, or already handled as In_Out
			}
			bool FPRegBelowStackTop = ((RegIndex >= STARS_x86_R_st1) && (RegIndex <= STARS_x86_R_st7));
			if (!(FPRegBelowStackTop && FPStackBelowTopPrinted)) { // avoid duplicate X86.FloatingPointStackDummy1
				bool CommaPrinted = false;
				if (0 == OutCount) { // first line needs Input =>
					if (this->AltersSPARKMemory()) {
						SMP_fprintf(HeaderFile, ",\n\t\tInput => (");
					}
					else {
						SMP_fprintf(HeaderFile, ",\n\t\tInput => (X86.Memory, ");
					}
					CommaPrinted = true; // suppress comma before first register
				}
				else if (0 == (OutCount % 4)) {  // print 4 regs per line
					SMP_fprintf(HeaderFile, ",\n\t\t");
					CommaPrinted = true;
				}
				++OutCount;
				if (CommaPrinted) {
					SMP_fprintf(HeaderFile, "X86.%s ", MDGetSPARKRegNumName((STARS_regnum_t)RegIndex, (uint16_t)ByteWidth));
				}
				else {
					SMP_fprintf(HeaderFile, ", X86.%s ", MDGetSPARKRegNumName((STARS_regnum_t)RegIndex, (uint16_t)ByteWidth));
				}
				FPStackBelowTopPrinted |= FPRegBelowStackTop;
			}
		} // end for all entries in the InputRegs set.
		if (OutCount > 0) { // last reg needs no comma after it, but must close inner parentheses
			SMP_fprintf(HeaderFile, ")");
		}
		else if (!this->AltersSPARKMemory()) {
			SMP_fprintf(HeaderFile, ",\n\t\tInput => (X86.Memory)");
		}
	}
	SMP_fprintf(HeaderFile, "),\n"); // close Globals section parenthesis

	// Emit memory writing range pre-conditions and post-conditions.
	bool HasLoopMemWrites = ((this->LoopCount > 0) && this->MemRangeRegsBitmap.any());
	this->EmitSPARKProcPrePostMemConditions(BodyFile, HeaderFile, true);
	this->EmitSPARKProcPrePostMemConditions(BodyFile, HeaderFile, false);

	// Begin recursive descent translation with entry block.
	SMPBasicBlock *CurrBlock = this->RPOBlocks[0];
	this->ResetProcessedBlocks(); // no block has been translated yet
#if 0
	int FollowBlockNum = this->FindFollowBlockNum(CurrBlock, false);
#else
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
#endif
	this->EmitSPARKAdaForBlock(0, FollowBlockNum, BodyFile, false);

	SMP_fprintf(BodyFile, "\nend %s;\n\n", AdaFuncName.c_str());
	STARS_SPARK_IndentCount = 1;

	// Translate the loops to procedures
	this->TranslatingSPARKLoop = true;
	list<pair<int, pair<int, int> > >::iterator WorkListIter = this->SPARKLoopWorkList.begin();
	while (WorkListIter != this->SPARKLoopWorkList.end()) {
		int LoopIndex = WorkListIter->first;
		int HeaderBlockNum = WorkListIter->second.first;
		FollowBlockNum = WorkListIter->second.second;
		this->ResetProcessedBlocks(); // no block has been translated yet on this WorkList iteration
		// Emit the procedure specification, up through pre-conditions, into HeaderFile and begin the proc in BodyFile.
		string ProcName = this->EmitSPARKProcForLoopHeaderBlock(LoopIndex, HeaderBlockNum, FollowBlockNum, BodyFile, HeaderFile);
		// Emit the SPARK code for the loop.
		this->EmitSPARKAdaForBlock(HeaderBlockNum, FollowBlockNum, BodyFile, false, true);
		// Terminate the procedure emitted for the loop.
		SMP_fprintf(BodyFile, "\nend %s;\n\n", ProcName.c_str());
		STARS_SPARK_IndentCount = 1;

		this->SPARKLoopWorkList.pop_front();
		WorkListIter = this->SPARKLoopWorkList.begin();
	}

	this->TranslatingSPARKLoop = false;

	// Audit completeness of translation.
	bool TranslationComplete = true;
	for (list<SMPInstr *>::iterator InstIter = this->GetFirstInstIter(); InstIter != this->GetLastInstIter(); ++InstIter) {
		SMPInstr *CurrInst = (*InstIter);
		if (!CurrInst->HasBeenTranslatedToSPARK() && (!CurrInst->IsMarkerInst()) && (!CurrInst->IsNop())) {
			STARS_ea_t InstAddr = CurrInst->GetAddr();
			SMPBasicBlock *CurrBlock = this->GetBlockFromInstAddr(InstAddr);
			assert(nullptr != CurrBlock);
			SMP_fprintf(BodyFile, "ERROR: Instruction at %llx not translated.\n", (uint64_t)InstAddr);
			TranslationComplete = false;
		}
	}
	if (!TranslationComplete) {
		SMP_msg("ERROR: Incomplete SPARK Ada translation for %s\n", this->GetFuncName());
		this->Dump();
	}

	return;
} // end of SMPFunction::EmitFuncSPARKAda()

#if 0
// Emit mem writing ranges for pre- and post-conditions
void SMPFunction::EmitSPARKProcPrePostMemConditions(FILE *BodyFile, FILE *HeaderFile, bool PreconditionSection) {
	size_t ByteWidth = global_STARS_program->GetSTARS_ISA_Bytewidth();
	bool HasLoopMemWrites = false;
	bool HasNonLoopInArgMemWrites = false;
	bool HasCalleeMemWrites = false;
	bool HasCalleeLoopInArgMemWrites = this->CalleeUsesInArgsForLoopMemWrites();
	bool HasCalleeLoopMemWrites = false; // any source, calleed from inside or outside loop
	bool NeedsPreconditions = false;
	size_t OutputCount = 0;

	if (PreconditionSection) {
		NeedsPreconditions = this->NeedsSPARKPreconditions(HasLoopMemWrites, HasNonLoopInArgMemWrites, HasCalleeMemWrites, HasCalleeLoopMemWrites);
		if (!NeedsPreconditions) {
			return;
		}
	}
	else { // post-conditions
		// Determine booleans that drive output
		HasCalleeLoopMemWrites = this->HasMemExprsFromCalleeLoops;
		for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
			if (this->NonStackFrameLoopMemWrites[LoopIndex]) {
				HasLoopMemWrites = true;
				break;
			}
		}
		HasNonLoopInArgMemWrites = (!this->InArgsUsedInMemWrites[0].empty());
		for (size_t LoopNumPlusOne = 0; LoopNumPlusOne <= this->GetNumLoops(); ++LoopNumPlusOne) {
			for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopNumPlusOne].size(); ++i) {
				if (this->NonStackFrameCalleeMemWrites[LoopNumPlusOne][i]) { // not just local stack frame write
					HasCalleeMemWrites = true;
					break;
				}
			}
		}

		// Start the post-condition section with items not shared with the pre-condition logic.
		if (this->HasCalleeChainWithNonReturningFunc() || this->HasCallToNonReturningFunc()) {
			SMP_fprintf(HeaderFile, "\tPost => X86.Exit_Called or else ");
		}
		else {
			SMP_fprintf(HeaderFile, "\tPost => ");
		}
		if (this->AltersSPARKMemory()) {
			SMP_fprintf(HeaderFile, "(X86.RSP = (X86.RSP'Old + 8)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old) = X86.Memory'Old(X86.RSP'Old)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 1) = X86.Memory'Old(X86.RSP'Old + 1)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 2) = X86.Memory'Old(X86.RSP'Old + 2)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 3) = X86.Memory'Old(X86.RSP'Old + 3)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 4) = X86.Memory'Old(X86.RSP'Old + 4)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 5) = X86.Memory'Old(X86.RSP'Old + 5)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 6) = X86.Memory'Old(X86.RSP'Old + 6)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 7) = X86.Memory'Old(X86.RSP'Old + 7))");
			OutputCount = 9;
		}
		else {
			SMP_fprintf(HeaderFile, "(X86.RSP = (X86.RSP'Old + 8))");
			++OutputCount;
		}
		if (this->PreservedRegsBitmap.any()) {
			size_t RegLimit = this->PreservedRegsBitmap.size();
			if (RegLimit > STARS_x86_R_r15) {
				RegLimit = STARS_x86_R_r15; // last full-width general purpose reg
			}
			for (size_t RegIndex = 0; RegIndex <= RegLimit; ++RegIndex) {
				if (this->IsRegPreserved(RegIndex)) {
					const char *RegName = MDGetSPARKRegNumName((STARS_regnum_t)RegIndex, ByteWidth);
					SMP_fprintf(HeaderFile, " and\n\t\t(X86.%s = X86.%s'Old)", RegName, RegName);
					++OutputCount;
				}
			}
		}
	}

	if (HasLoopMemWrites || HasNonLoopInArgMemWrites || HasCalleeMemWrites || HasCalleeLoopInArgMemWrites || HasCalleeLoopMemWrites) {
		if (PreconditionSection)
			SMP_fprintf(HeaderFile, "\tPre => (");
		else
			SMP_fprintf(HeaderFile, "\tand \n\t(for all i in Unsigned64 => (if X86.InSafeRegion64(i, X86.RSP'Old)");
	}
	else if (!PreconditionSection) {
		if (this->IsPossibleIndirectCallTarget() || this->AltersSPARKMemory()) {
			SMP_fprintf(HeaderFile, "\tand \n\t(for all i in Unsigned64 => (if X86.InSafeRegion64(i, X86.RSP'Old)");
			SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i))));\n\n");
		}
		else {
			SMP_fprintf(HeaderFile, ";\n\n"); // terminate postcondition section
		}
	}

	if (!HasNonLoopInArgMemWrites) {
		if (HasLoopMemWrites) {
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, "for all i in Unsigned64 => (if X86.InMemoryRange(i, ");

			for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
				if (this->NonStackFrameLoopMemWrites[LoopIndex]) { // not just local var write
					this->EmitSPARKMemRange(HeaderFile, PreconditionSection, this->LoopMemWriteLowerBoundsExprs[LoopIndex], this->LoopMemWriteUpperBoundsExprs[LoopIndex], OutputCount);
				}
			}
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, " then\n\t\tX86.InSafeRegion64(i, X86.RSP))"); // terminate universal quantifier section
		}
		if (HasCalleeMemWrites) {
			for (size_t LoopNumPlusOne = 0; LoopNumPlusOne <= this->GetNumLoops(); ++LoopNumPlusOne) {
				this->EmitSPARKPrePostMemConditionsFromCalleeNonLoopWrites(HeaderFile, PreconditionSection, LoopNumPlusOne, OutputCount);
			}
		}
		if (HasCalleeLoopMemWrites) {
			this->EmitSPARKPrePostMemConditionsFromCalleeLoops(HeaderFile, PreconditionSection, OutputCount);
		}
		if (HasLoopMemWrites || HasCalleeMemWrites || HasCalleeLoopInArgMemWrites || HasCalleeLoopMemWrites) {
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, "),\n"); // terminate precondition section
			else // terminate the postcondition section
				SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i))));\n\n");
		}
	}
	else { // HasNonLoopInArgMemWrites is true
		if (HasLoopMemWrites) {
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, "for all i in Unsigned64 => (if X86.InMemoryRange(i, ");

			for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
				if (this->NonStackFrameLoopMemWrites[LoopIndex]) { // not just local var write
					this->EmitSPARKMemRange(HeaderFile, PreconditionSection, this->LoopMemWriteLowerBoundsExprs[LoopIndex], this->LoopMemWriteUpperBoundsExprs[LoopIndex], OutputCount);
				}
			}
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, " then\n\t\tX86.InSafeRegion64(i, X86.RSP))"); // terminate universal quantifier section
		}

		// NonLoopInArgMemWrites
		STARSExpression *PreviousExpr = nullptr;
		for (size_t LoopIndexPlusOne = 0; LoopIndexPlusOne <= this->GetNumLoops(); ++LoopIndexPlusOne) {
			for (size_t i = 0; i < this->InArgsUsedInMemWriteByteWidths[LoopIndexPlusOne].size(); ++i) {
				STARSExprSetIter InArgExprIter = this->InArgsUsedInMemWriteByteWidths[LoopIndexPlusOne][i].first;
				STARSExpression *InArgExpr = (*InArgExprIter);
				// Reduce duplicate output.
				if (nullptr == PreviousExpr) {
					PreviousExpr = InArgExpr;
				}
				else if (PreviousExpr->IsEqualExpr(InArgExpr)) {
					continue;
				}
				else {
					PreviousExpr = InArgExpr;
				}

				// Handle memory ranges in the LoopMemWrites section above.
				if (InArgExpr->IsRelationalExpr())
					continue;

				// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
				//  which would require printing the same operand twice.
				if (0 < OutputCount) {
					if (PreconditionSection)
						SMP_fprintf(HeaderFile, " and ");
					else
						SMP_fprintf(HeaderFile, "\n\t\t and not X86.InRange64(i, ");
				}
				else {
					assert(PreconditionSection);
				}
				size_t MemWidth = (size_t) this->InArgsUsedInMemWriteByteWidths[LoopIndexPlusOne][i].second;
				if (PreconditionSection) {
					InArgExpr->EmitSPARKInSafeRegion64(HeaderFile, MemWidth);
				}
				else {
					InArgExpr->EmitSPARKAda(HeaderFile, false, !PreconditionSection, false, false);
					SMP_fprintf(HeaderFile, ", %u)", MemWidth);
				}
				++OutputCount;
			} // end for i in current LoopIndexPlusOne size()
		} // end for LoopIndexPlusOne

		if (HasCalleeMemWrites) {
			for (size_t LoopNumPlusOne = 0; LoopNumPlusOne <= this->GetNumLoops(); ++LoopNumPlusOne) {
				this->EmitSPARKPrePostMemConditionsFromCalleeNonLoopWrites(HeaderFile, PreconditionSection, LoopNumPlusOne, OutputCount);
			}
		}
		if (HasCalleeLoopMemWrites) {
			this->EmitSPARKPrePostMemConditionsFromCalleeLoops(HeaderFile, PreconditionSection, OutputCount);
		}

		if (PreconditionSection)
			SMP_fprintf(HeaderFile, "),\n"); // terminate precondition section
		else // terminate the postcondition section
			SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i))));\n\n");
	} // end if (!HasNonLoopInArgMemWrites) ... else ...

	return;
} // end of SMPFunction::EmitSPARKProcPrePostMemConditions()

#else
// Emit mem writing ranges for pre- and post-conditions
void SMPFunction::EmitSPARKProcPrePostMemConditions(FILE *BodyFile, FILE *HeaderFile, bool PreconditionSection) {
	size_t ByteWidth = global_STARS_program->GetSTARS_ISA_Bytewidth();
	bool HasLoopMemWrites = false;
	bool HasNonLoopInArgMemWrites = false;
	bool HasCalleeMemWrites = false;
	bool HasCalleeLoopInArgMemWrites = this->CalleeUsesInArgsForLoopMemWrites();
	bool HasCalleeLoopMemWrites = false; // any source, calleed from inside or outside loop
	bool NeedsPreconditions = false;
	size_t OutputCount = 0;

	if (PreconditionSection) {
		NeedsPreconditions = this->NeedsSPARKPreconditions(HasLoopMemWrites, HasNonLoopInArgMemWrites, HasCalleeMemWrites, HasCalleeLoopMemWrites);
		if (!NeedsPreconditions) {
			return;
		}
	}
	else { // post-conditions
		// Determine booleans that drive output
		HasCalleeLoopMemWrites = this->HasMemExprsFromCalleeLoops;
		for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
			if (this->NonStackFrameLoopMemWrites[LoopIndex]) {
				HasLoopMemWrites = true;
				break;
			}
		}
		HasNonLoopInArgMemWrites = (!this->InArgsUsedInMemWrites[0].empty());
		for (size_t LoopNumPlusOne = 0; LoopNumPlusOne <= this->GetNumLoops(); ++LoopNumPlusOne) {
			for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopNumPlusOne].size(); ++i) {
				if (this->NonStackFrameCalleeMemWrites[LoopNumPlusOne][i]) { // not just local stack frame write
					HasCalleeMemWrites = true;
					break;
				}
			}
		}

		// Start the post-condition section with items not shared with the pre-condition logic.
		if (this->HasCalleeChainWithNonReturningFunc() || this->HasCallToNonReturningFunc()) {
			SMP_fprintf(HeaderFile, "\tPost => X86.Exit_Called or else ");
		}
		else {
			SMP_fprintf(HeaderFile, "\tPost => ");
		}
		if (this->AltersSPARKMemory()) {
			SMP_fprintf(HeaderFile, "(X86.RSP = (X86.RSP'Old + 8)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old) = X86.Memory'Old(X86.RSP'Old)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 1) = X86.Memory'Old(X86.RSP'Old + 1)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 2) = X86.Memory'Old(X86.RSP'Old + 2)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 3) = X86.Memory'Old(X86.RSP'Old + 3)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 4) = X86.Memory'Old(X86.RSP'Old + 4)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 5) = X86.Memory'Old(X86.RSP'Old + 5)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 6) = X86.Memory'Old(X86.RSP'Old + 6)) and\n");
			SMP_fprintf(HeaderFile, "\t\t(X86.Memory(X86.RSP'Old + 7) = X86.Memory'Old(X86.RSP'Old + 7))");
			OutputCount = 9;
		}
		else {
			SMP_fprintf(HeaderFile, "(X86.RSP = (X86.RSP'Old + 8))");
			++OutputCount;
		}
		if (this->PreservedRegsBitmap.any()) {
			size_t RegLimit = this->PreservedRegsBitmap.size();
			if (RegLimit > STARS_x86_R_r15) {
				RegLimit = STARS_x86_R_r15; // last full-width general purpose reg
			}
			for (size_t RegIndex = 0; RegIndex <= RegLimit; ++RegIndex) {
				// In addition to our own PreservedRegs, a reg could be Preserved by no being written,
				//  and all callees preserving it, in which case it is In_Out but does not need to be
				//  explicitly preserved in this function and will not be in the PreservedRegs bitset.
				bool PreservedByCalleesOnly = ((!this->OutputRegs[RegIndex]) && this->CalleePreservedRegs[RegIndex]);
				if (this->IsRegPreserved(RegIndex) || PreservedByCalleesOnly) {
					const char *RegName = MDGetSPARKRegNumName((STARS_regnum_t)RegIndex, ByteWidth);
					SMP_fprintf(HeaderFile, " and\n\t\t(X86.%s = X86.%s'Old)", RegName, RegName);
					++OutputCount;
				}
			}
		}
	}

	// Use sets of exprs to avoid duplication and streamline the output section.
	STARSExprSet LowerBoundsExprs;
	STARSExprSet UpperBoundsExprs;
	STARSExprSet NonRangeExprs;
	pair<STARSExprSetIter, bool> InsertResult, InsertResult2;
	vector<pair<STARSExprSetIter, STARSExprSetIter> > RangeExprWidthIters;
	vector<pair<size_t, STARSExprSetIter> > NonRangeExprWidthIters;

	for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
		STARSExprBoundsIter ExprIter;
		for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
			STARSExpression *LowerExpr = ExprIter->first;
			STARSExpression *UpperExpr = ExprIter->second;
			bool NonStackFrameAccess = IsLocalStackFrameExprPair(LowerExpr, UpperExpr);

			if (NonStackFrameAccess) { // not just local var write
				InsertResult = LowerBoundsExprs.insert(LowerExpr);
				InsertResult2 = UpperBoundsExprs.insert(UpperExpr);
				if (InsertResult.second || InsertResult2.second) {
					// New range, because at least one insert was not a duplicate.
					pair<STARSExprSetIter, STARSExprSetIter> InsertValue(InsertResult.first, InsertResult2.first);
					RangeExprWidthIters.push_back(InsertValue);
				}
			}
		} // end for all loop mem write exprs
	}

	// Two cases. (1) From outside any loop, we call foo() and it has writes inside a loop.
	//  (2) From inside a loop, we call foo() and it has writes inside a loop.
	// Case (2) is more complex, as we need to analyze the worst-case range of mem addresses
	//  given that we have nested loops, albeit across procedure boundaries.
	// Case (1) is found in index 0 of vector this->LoopMemAddrExprsFromCalleeLoops[] while
	//  case (2) is found at index [LoopIndex + 1] in the same vector.
	size_t IndexLimit = this->LoopMemAddrExprWidthsFromCalleeLoops.size();
	for (size_t LoopNumPlusOne = 0; LoopNumPlusOne < IndexLimit; ++LoopNumPlusOne) {
		list<std::pair<STARSExprSetIter, size_t> >::const_iterator MemExprIter;
		for (MemExprIter = this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].cbegin(); MemExprIter != this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].cend(); ++MemExprIter) {
			pair<STARSExprSetIter, size_t> MemExprItem = (*MemExprIter);
			size_t MemWidth = MemExprItem.second;
			STARSExprSetIter ExprSetIter = MemExprItem.first;
			if ((*ExprSetIter)->IsStackPtrPlusOffset())
				continue;
			else if ((*ExprSetIter)->IsRelationalExpr()) {
				SMP_msg("INFO: SPARK: Saved LoopMemAddrFromCallees relational expr in EmitSPARKProcPrePostMemConditions for LoopPlusOne: %zu Func: %s\n",
					LoopNumPlusOne, this->GetFuncName());
				// We have already computed the upper bound taking into account the ByteWidth, so pass
				//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
				this->SplitAndSaveRelationalExpr(true, LoopNumPlusOne, 0, (*ExprSetIter));
				continue;
			}
			else { // non-range expr
				InsertResult = NonRangeExprs.insert(*ExprSetIter);
				if (InsertResult.second) {
					// Unique new expr.
					pair<size_t, STARSExprSetIter> InsertValue(MemWidth, InsertResult.first);
					NonRangeExprWidthIters.push_back(InsertValue);
				}
			}
		} // end for MemExprIter from LoopMemAddrExprWidthsFromCalleeLoops

		for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopNumPlusOne].size(); ++i) {
			if (this->NonStackFrameCalleeMemWrites[LoopNumPlusOne][i]) { // not just local stack frame write
				size_t MemWidth = this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].second;
				STARSExpression *CalleeExpr = (*this->MemAddrExprWidthsFromCallees[LoopNumPlusOne][i].first);
				if (CalleeExpr->IsRelationalExpr()) {
					SMP_msg("INFO: SPARK: Saved MemExprFromCallees relational expr in EmitSPARKProcPrePostMemConditions for LoopPlusOne: %zu Func: %s\n",
						LoopNumPlusOne, this->GetFuncName());
					// We have already computed the upper bound taking into account the ByteWidth, so pass
					//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
					this->SplitAndSaveRelationalExpr(true, LoopNumPlusOne, 0, CalleeExpr);
					continue;
				}
				else {
					InsertResult = NonRangeExprs.insert(CalleeExpr);
					if (InsertResult.second) {
						// Unique new expr.
						pair<size_t, STARSExprSetIter> InsertValue(MemWidth, InsertResult.first);
						NonRangeExprWidthIters.push_back(InsertValue);
					}
				}
			}
		} // end for i in this->MemAddrExprWidthsFromCallees[LoopNumPlusOne]

		for (size_t i = 0; i < this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne].size(); ++i) {
			STARSExprSetIter InArgExprIter = this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne][i].first;
			STARSExpression *InArgExpr = (*InArgExprIter);
			// Handle memory ranges in the LoopMemWrites section above.
			size_t MemWidth = (size_t) this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne][i].second;
			if (InArgExpr->IsRelationalExpr()) {
				SMP_msg("INFO: SPARK: Saved InArg relational expr in EmitSPARKProcPrePostMemConditions for LoopPlusOne: %zu Func: %s\n", 
					LoopNumPlusOne, this->GetFuncName());
				// We have already computed the upper bound taking into account the ByteWidth, so pass
				//  a ByteWidth of 0 here to avoid double-adding the ByteWidth in EmitSPARKMemRange().
				this->SplitAndSaveRelationalExpr(true, LoopNumPlusOne, 0, InArgExpr);
				continue;
			}
			else {
				InsertResult = NonRangeExprs.insert((*this->InArgsUsedInMemWriteByteWidths[LoopNumPlusOne][i].first));
				if (InsertResult.second) {
					// Unique new expr.
					pair<size_t, STARSExprSetIter> InsertValue(MemWidth, InsertResult.first);
					NonRangeExprWidthIters.push_back(InsertValue);
				}
			}
		} // end for i in current LoopIndexPlusOne size()
	} // end for LoopNumPlusOne

	bool HasRangeExprs = (!RangeExprWidthIters.empty());
	bool HasNonRangeExprs = (!NonRangeExprWidthIters.empty());
	bool HasRelationalExprs = false;
	assert(IndexLimit == (1 + this->GetNumLoops()));
	for (size_t LoopNumPlusOne = 0; LoopNumPlusOne < IndexLimit; ++LoopNumPlusOne) {
		if (!this->RelationalMemWriteWidths[LoopNumPlusOne].empty()) {
			HasRelationalExprs = true;
			break;
		}
	}

	if (HasRangeExprs || HasNonRangeExprs || HasRelationalExprs) { // output is needed
		if (PreconditionSection)
			SMP_fprintf(HeaderFile, "\tPre => (");
		else
			SMP_fprintf(HeaderFile, "\tand \n\t(for all i in Unsigned64 => (if X86.InSafeRegion64(i, X86.RSP'Old)");

		if (HasRangeExprs || HasRelationalExprs) {
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, "(for all i in Unsigned64 => (if X86.InMemoryRange(i, ");
		}

		if (HasRangeExprs) {
			size_t VectorLimit = RangeExprWidthIters.size();
			for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
				STARSExpression *LowerExpr = (*RangeExprWidthIters[VectorIndex].first);
				STARSExpression *UpperExpr = (*RangeExprWidthIters[VectorIndex].second);
				this->EmitSPARKMemRange(HeaderFile, PreconditionSection, false, false, LowerExpr, UpperExpr, OutputCount, 0);
			}
		}

		if (HasRelationalExprs) {
			for (size_t LoopNumPlusOne = 0; LoopNumPlusOne < IndexLimit; ++LoopNumPlusOne) {
				std::list<std::pair<std::size_t, std::pair<STARSExprSetIter, STARSExprSetIter> > >::const_iterator RelationalIter;
				for (RelationalIter = this->RelationalMemWriteWidths[LoopNumPlusOne].cbegin(); RelationalIter != this->RelationalMemWriteWidths[LoopNumPlusOne].cend(); ++RelationalIter) {
					size_t MemWidth = (*RelationalIter).first;
					STARSExprSetIter LowerExprIter = (*RelationalIter).second.first;
					STARSExprSetIter UpperExprIter = (*RelationalIter).second.second;
					this->EmitSPARKMemRange(HeaderFile, PreconditionSection, false, false, (*LowerExprIter), (*UpperExprIter), OutputCount, MemWidth);
				}
			}
		}

		if (HasRangeExprs || HasRelationalExprs) {
			if (PreconditionSection)
				SMP_fprintf(HeaderFile, " then\n\t\tX86.InSafeRegion64(i, X86.RSP)))"); // terminate universal quantifier section
		}

		if (HasNonRangeExprs) {
			size_t VectorLimit = NonRangeExprWidthIters.size();
			for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
				// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
				//  which would require printing the same operand twice.
				if (0 < OutputCount) {
					if (PreconditionSection)
						SMP_fprintf(HeaderFile, " and ");
					else
						SMP_fprintf(HeaderFile, "\n\t\t and not X86.InRange64(i, ");
				}
				else {
					assert(PreconditionSection);
				}
				size_t MemWidth = NonRangeExprWidthIters[VectorIndex].first;
				STARSExpression *NonRangeExpr = (*NonRangeExprWidthIters[VectorIndex].second);
				if (PreconditionSection) {
					NonRangeExpr->EmitSPARKInSafeRegion64(HeaderFile, MemWidth);
				}
				else {
					NonRangeExpr->EmitSPARKAda(HeaderFile, false, !PreconditionSection, false, false, false);
					SMP_fprintf(HeaderFile, ", %u)", MemWidth);
				}
				++OutputCount;
			} // end for VectorIndex over all NonRangeExprWidthIters
		} // end if (HasNonRangeExprs)

		if (PreconditionSection)
			SMP_fprintf(HeaderFile, "),\n"); // terminate precondition section
		else // terminate the postcondition section
			SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i))));\n\n");
	} // end if (HasRangeExprs || HasNonRangeExprs)
	else if (!PreconditionSection) {
		if (this->IsPossibleIndirectCallTarget() || this->AltersSPARKMemory()) {
			SMP_fprintf(HeaderFile, "\tand \n\t(for all i in Unsigned64 => (if X86.InSafeRegion64(i, X86.RSP'Old)");
			SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i))));\n\n");
		}
		else {
			SMP_fprintf(HeaderFile, ";\n\n"); // terminate postcondition section
		}
	}

#if 0
	// Temporary fix: If the only exprs were relational and were skipped above, we need to
	//  close the precondition or postcondition section.
	if ((0 == OutputCount) && (HasRangeExprs || HasNonRangeExprs)) {
		if (PreconditionSection)
			SMP_fprintf(HeaderFile, "),\n"); // terminate precondition section
		else // terminate the postcondition section
			SMP_fprintf(HeaderFile, " then\n\t\t(X86.Memory(i) = X86.Memory'Old(i))));\n\n");
	}
#endif

	RangeExprWidthIters.clear();
	NonRangeExprWidthIters.clear();
	NonRangeExprs.clear();
	LowerBoundsExprs.clear();
	UpperBoundsExprs.clear();

	return;
} // end of SMPFunction::EmitSPARKProcPrePostMemConditions()

#endif

void SMPFunction::EmitSPARKMemRange(FILE *HeaderFile, bool PreconditionSection, bool ProcessingLoop, bool HasArgs, STARSExpression *LowerExpr, STARSExpression *UpperExpr, size_t &OutputCount, size_t MemWidth) {
	if (0 < OutputCount) {
		if (PreconditionSection)
			SMP_fprintf(HeaderFile, "\n\t\t or X86.InMemoryRange(i, ");
		else
			SMP_fprintf(HeaderFile, "\n\t\t and not X86.InMemoryRange(i, ");
	}
	bool OldSuffix = (!(ProcessingLoop || PreconditionSection));
	LowerExpr->EmitSPARKAda(HeaderFile, ProcessingLoop, OldSuffix, false, HasArgs, false, false);
	SMP_fprintf(HeaderFile, ", ");
	UpperExpr->EmitSPARKAda(HeaderFile, ProcessingLoop, OldSuffix, false, HasArgs, false, false);
	if (0 < MemWidth) {
		SMP_fprintf(HeaderFile, " + %zu) ", MemWidth);
	}
	else {
		SMP_fprintf(HeaderFile, ") ");
	}
	++OutputCount;
	return;
} // end of SMPFunction::EmitSPARKMemRange()

// helper for EmitSPARKProcPrePostMemConditions()
void SMPFunction::EmitSPARKPrePostMemConditionsFromCalleeNonLoopWrites(FILE *HeaderFile, bool PreconditionSection, size_t LoopIndexPlusOne, size_t &OutputCount) {
	for (size_t i = 0; i < this->MemAddrExprWidthsFromCallees[LoopIndexPlusOne].size(); ++i) {
		if (this->NonStackFrameCalleeMemWrites[LoopIndexPlusOne][i]) { // not just local stack frame write
			// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
			//  which would require printing the same operand twice.
			if (0 < OutputCount) {
				if (PreconditionSection)
					SMP_fprintf(HeaderFile, " and ");
				else
					SMP_fprintf(HeaderFile, "\n\t\t and not X86.InRange64(i, ");
			}
			else {
				assert(PreconditionSection);
			}
			size_t MemWidth = this->MemAddrExprWidthsFromCallees[LoopIndexPlusOne][i].second;
			if (PreconditionSection) {
				(*this->MemAddrExprWidthsFromCallees[LoopIndexPlusOne][i].first)->EmitSPARKInSafeRegion64(HeaderFile, MemWidth);
			}
			else {
				(*this->MemAddrExprWidthsFromCallees[LoopIndexPlusOne][i].first)->EmitSPARKAda(HeaderFile, false, !PreconditionSection, false, false, false);
				SMP_fprintf(HeaderFile, ", %u)", MemWidth);
			}
			++OutputCount;
		}
	} // end for all entries in MemAddrExprWidthFromCallees[LoopIndexPlusOne]
	return;
} // end of SMPFunction::EmitSPARKPrePostMemConditionsFromCalleeNonLoopWrites()

#if 0
// helper for EmitSPARKProcPrePostMemConditions()
void SMPFunction::EmitSPARKPrePostMemConditionsFromCalleeLoops(FILE *HeaderFile, bool PreconditionSection, std::size_t &OutputCount) {
	// Two cases. (1) From outside any loop, we call foo() and it has writes inside a loop.
	//  (2) From inside a loop, we call foo() and it has writes inside a loop.
	// Case (2) is more complex, as we need to analyze the worst-case range of mem addresses
	//  given that we have nested loops, albeit across procedure boundaries.
	// Case (1) is found in index 0 of vector this->LoopMemAddrExprsFromCalleeLoops[] while
	//  case (2) is found at index [LoopIndex + 1] in the same vector.
	size_t IndexLimit = this->LoopMemAddrExprWidthsFromCalleeLoops.size();
	for (size_t LoopNumPlusOne = 0; LoopNumPlusOne < IndexLimit; ++LoopNumPlusOne) {
		for (size_t i = 0; i < this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].size(); ++i) { // case (1)
			// if (this->NonStackFrameCalleeMemWrites[0][i]) { // not just local stack frame write; NEED SOMETHING SIMILAR HERE
			// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
			//  which would require printing the same operand/expr twice.
			list<std::pair<STARSExprSetIter, size_t> >::const_iterator MemExprIter;
			for (MemExprIter = this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].cbegin(); MemExprIter != this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne].cend(); ++MemExprIter) {
				pair<STARSExprSetIter, size_t> MemExprItem = (*MemExprIter);
				size_t MemWidth = MemExprItem.second;
				STARSExprSetIter ExprSetIter = MemExprItem.first;
				if ((*ExprSetIter)->IsRelationalExpr())
					continue;

				if (0 < OutputCount) {
					if (PreconditionSection)
						SMP_fprintf(HeaderFile, " and ");
					else
						SMP_fprintf(HeaderFile, "\n\t\t and not X86.InRange64(i, ");
					++OutputCount;
				}
				else {
					assert(PreconditionSection);
				}
				if (PreconditionSection) {
					(*ExprSetIter)->EmitSPARKInSafeRegion64(HeaderFile, MemWidth);
				}
				else {
					(*ExprSetIter)->EmitSPARKAda(HeaderFile, false, !PreconditionSection, false, false, false);
					SMP_fprintf(HeaderFile, ", %u)", MemWidth);
				}
				++OutputCount;
			}
			// }
		} // end for all this->LoopMemAddrExprWidthsFromCalleeLoops[LoopNumPlusOne]
	} // end for all loop-plus-one indices

	return;
} // end of SMPFunction::EmitSPARKPrePostMemConditionsFromCalleeLoops()
#endif

// Determine if any mem writes (or callee mem writes) are not just to the stack frame locals.
bool SMPFunction::NeedsSPARKPreconditions(bool &HasLoopMemWrites, bool &HasNonLoopInArgMemWrites, bool &HasCalleeMemWrites, bool &HasCalleeLoopMemWrites) {
	HasLoopMemWrites = ((this->LoopCount > 0) && this->MemRangeRegsBitmap.any());
	HasNonLoopInArgMemWrites = (!this->InArgsUsedInMemWrites[0].empty());
	HasCalleeMemWrites = (!this->MemAddrExprsFromCallees[0].empty());
	HasCalleeLoopMemWrites = this->HasMemExprsFromCalleeLoops;

	bool HasLoopGlobalStaticWrites = false;
#if 0  // now that we process multiple writes per loop, static write does not exclude stack writes
	if (!HasLoopMemWrites && (0 < this->LoopCount)) {
		// See if we have loop mem writes to global static memory.
		for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
			if (this->LoopWritesGlobalStaticMemory[LoopIndex]) {
				HasLoopGlobalStaticWrites = true;
				HasLoopMemWrites = true;
				this->NonStackFrameLoopMemWrites[LoopIndex] = true;
				break;
			}
		}
	}
#endif

	if (HasLoopMemWrites && (!HasLoopGlobalStaticWrites)) {
		// Apply more stringent criterion: Loop has to write outside of our stack frame.
		HasLoopMemWrites = false;
		for (size_t LoopIndex = 0; LoopIndex < this->LoopCount; ++LoopIndex) {
			STARS_sval_t FinalStackDelta = 0;
			STARSExprBoundsIter ExprIter;
			for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
				STARSExpression *LowerExpr = ExprIter->first;
				STARSExpression *UpperExpr = ExprIter->second;
				if (!IsLocalStackFrameExprPair(LowerExpr, UpperExpr)) {
					this->NonStackFrameLoopMemWrites[LoopIndex] = true;
					HasLoopMemWrites = true;
					break; // needs preconditions already
				}
			} // end for all loop bounds exprs
		}
	}

	if (!HasCalleeMemWrites) { // no callee mem writes outside of this function's loop
		// Check the loop-index entries for true votes.
		for (size_t LoopIndexOffsetByOne = 1; LoopIndexOffsetByOne <= this->LoopCount; ++LoopIndexOffsetByOne) {
			if (!this->MemAddrExprsFromCallees[LoopIndexOffsetByOne].empty()) {
				HasCalleeMemWrites = true;
				break;
			}
		}
	}

	if (HasCalleeMemWrites) {
		// Apply the more stringent criterion.
		HasCalleeMemWrites = false;
		for (size_t LoopIndexOffsetByOne = 0; LoopIndexOffsetByOne <= this->LoopCount; ++LoopIndexOffsetByOne) {
			for (size_t index = 0; index < this->MemAddrExprWidthsFromCallees[LoopIndexOffsetByOne].size(); ++index) {
				STARS_sval_t FinalStackDelta = 0;
				if ((*(this->MemAddrExprWidthsFromCallees[LoopIndexOffsetByOne][index].first))->IsStackPtrOffset((STARS_sval_t) this->AllocPointDelta, FinalStackDelta)) {
					if (FinalStackDelta < 0) { // stack frame local var
						this->NonStackFrameCalleeMemWrites[LoopIndexOffsetByOne].push_back(false);
					}
					else {
						this->NonStackFrameCalleeMemWrites[LoopIndexOffsetByOne].push_back(true);
						HasCalleeMemWrites = true;
					}
				}
				else {
					this->NonStackFrameCalleeMemWrites[LoopIndexOffsetByOne].push_back(true);
					HasCalleeMemWrites = true;
				}
			}
		}
	}

	return (HasLoopMemWrites || HasNonLoopInArgMemWrites || HasCalleeMemWrites || HasCalleeLoopMemWrites);
} // end of SMPFunction::NeedsSPARKPreconditions()

string SMPFunction::GetFuncSPARKSuffixString(void) const {
	char HexAddr[12];
	SMP_snprintf(HexAddr, 11, "_%x", this->GetFirstFuncAddr());
	string FuncAddrString(HexAddr);  // e.g. "_400586"
	return FuncAddrString;
}

// Save incoming args as locals to preserve their values
void SMPFunction::EmitSPARKSavedArgs(FILE *BodyFile) const {
	if ((0 < this->LoopCount) || (!this->InArgsUsedInMemWrites[0].empty())) {
		string FuncAddrString = this->GetFuncSPARKSuffixString();  // e.g. "_400586"
		std::bitset<1 + MD_LAST_REG_NO> EmittedRegs;
		if (0 < this->LoopCount) {
			EmittedRegs = this->MemRangeRegsBitmap;
			for (size_t i = 0; i < this->MemRangeRegsBitmap.size(); ++i) {
				if (this->MemRangeRegsBitmap[i]) {
					string CurrRegName(MDGetSPARKRegNumName((STARS_regnum_t)i, global_STARS_program->GetSTARS_ISA_Bytewidth()));
					string OutputName(CurrRegName + FuncAddrString);
#if 0
					// Need to only append "with Ghost" if the saved InArg will NOT be passed as a loop arg.
					SMP_fprintf(BodyFile, "\t%s : Unsigned64 := X86.%s with Ghost;\n", OutputName.c_str(), CurrRegName.c_str());
#else
					SMP_fprintf(BodyFile, "\t%s : Unsigned64 := X86.%s;\n", OutputName.c_str(), CurrRegName.c_str());
#endif
				}
			}
		}

		// Could also use this->LoopMemRangeInArgRegsBitMap for this next loop.
		for (size_t LoopPlusOne = 0; LoopPlusOne <= this->GetNumLoops(); ++LoopPlusOne) {
			size_t InArgLimit = this->GetNumInArgsUsedInMemWrites(LoopPlusOne);
			for (size_t ArgExprIndex = 0; ArgExprIndex < InArgLimit; ++ArgExprIndex) {
				STARSExprSetIter InArgExprIter = this->GetInArgExprUsedInMemWrite(LoopPlusOne, ArgExprIndex);
				STARS_regnum_t RegNum = (*InArgExprIter)->GetConstLeftOperand()->GetReg();
				// Don't emit an InArg save if we already did so for the InArgs used in loops above.
				if (!EmittedRegs[(size_t) RegNum]) {
					string CurrRegName(MDGetSPARKRegNumName(RegNum, global_STARS_program->GetSTARS_ISA_Bytewidth()));
					string OutputName(CurrRegName + FuncAddrString);
					SMP_fprintf(BodyFile, "\t%s : Unsigned64 := X86.%s with Ghost;\n", OutputName.c_str(), CurrRegName.c_str());
					EmittedRegs.set((size_t) RegNum);
				}
			}
		}
	}

	return;
} // end of SMPFunction::EmitSPARKSavedArgs()

// Emit loop function args, as Signature to HeaderFile or proc call to BodyFile.
void SMPFunction::EmitSPARKArgs(FILE *BodyFile, FILE *HeaderFile, bool Signature, size_t LoopIndex) const {
	if (Signature)
		assert(NULL != HeaderFile);
	else
		assert(NULL != BodyFile);

	char HexAddr[12];
	SMP_snprintf(HexAddr, 11, "_%x", this->GetFirstFuncAddr());
	string FuncAddrString(HexAddr);  // e.g. "_400586"
	size_t OutputCount = 0;

	for (size_t i = 0; i < this->LoopMemRangeInArgRegsBitmap[LoopIndex].size(); ++i) {
		if (this->LoopMemRangeInArgRegsBitmap[LoopIndex][i]) {
			string CurrRegName(MDGetSPARKRegNumName((STARS_regnum_t) i, global_STARS_program->GetSTARS_ISA_Bytewidth()));
			string OutputName(CurrRegName + FuncAddrString);
			if (OutputCount == 0) {
				if (Signature) {
					SMP_fprintf(HeaderFile, "%s : in Unsigned64", OutputName.c_str());
				}
				else {
					SMP_fprintf(BodyFile, "%s", OutputName.c_str());
				}
			}
			else {
				if (Signature) {
					SMP_fprintf(HeaderFile, "; %s : in Unsigned64", OutputName.c_str());
				}
				else {
					SMP_fprintf(BodyFile, ", %s", OutputName.c_str());
				}
			}
			++OutputCount;
		}
	}

	return;
} // end of SMPFunction::EmitSPARKArgs()

// Create SPARK procedure for loop starting at HeaderBlockNum
string SMPFunction::EmitSPARKProcForLoopHeaderBlock(int LoopIndex, int HeaderBlockNum, int FollowBlockNum, FILE *BodyFile, FILE *HeaderFile) {
	assert(0 <= LoopIndex);
	size_t LoopNum = (size_t) LoopIndex;
	assert(0 <= HeaderBlockNum);
	size_t LoopNumPlusOne = LoopNum + 1;
	SMPBasicBlock *CurrBlock = this->GetBlockByNum(HeaderBlockNum);
	bool HasArgs = this->DoesLoopHaveArgs(LoopIndex);
	bool MemoryInput = false;
	bool MemoryOutput = false;
	bool UseFP = this->UsesFramePointer();
	STARS_ea_t LoopAddr = CurrBlock->GetFirstNonMarkerAddr();
	bitset<1 + MD_LAST_REG_NO> LoopInputRegs, LoopOutputRegs, CalleePreservedRegs;
	int FirstBlockNum = HeaderBlockNum;

	// See if we are translating a guarded loop.
	STARS_ea_t LastAddr = CurrBlock->GetLastAddr();
	vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst();
	SMPInstr *LastInst = this->GetInstFromAddr(LastAddr);
	if ((*InstIter)->IsMarkerInst())
		++InstIter;
	bool TranslatingGuardedLoop = ((*InstIter)->HasBeenTranslatedToSPARK() && (!LastInst->HasBeenTranslatedToSPARK())
		&& (BRANCH_IF_THEN == this->GetControlFlowType(LastAddr)));
	if (TranslatingGuardedLoop) {
		map<STARS_ea_t, STARS_ea_t>::const_iterator GuardLoopIter = this->GuardToLoopMap.find(LastAddr);
		assert(GuardLoopIter != this->GuardToLoopMap.cend());
		LoopAddr = GuardLoopIter->second;
		LoopInputRegs.set(MD_FLAGS_REG);
		// Skip on to next block.
		FirstBlockNum = (*(CurrBlock->GetFallThroughSucc()))->GetNumber();
	}

	// Generate procedure name for translated loop.
	std::string ProcName("ZSTLoopProc_");
	std::ostringstream AddrString;
	AddrString << std::hex << LoopAddr;
	ProcName += AddrString.str();

	bool HasNonStackPreconditions = this->LoopRequiresNonStackPreconditions(LoopNum);
	// Use sets of exprs to avoid duplication and streamline the output section.
	this->AggregateLoopMemExprs(LoopNum);
	bool HasRangeExprs = (!this->TempRangeExprWidthIters.empty());
	bool HasNonRangeExprs = (!this->TempNonRangeExprWidthIters.empty());
	bool HasRelationalExprs = (!this->RelationalMemWriteWidths[LoopNumPlusOne].empty());
	bool HasMemExprs = (HasRangeExprs || HasNonRangeExprs || HasRelationalExprs);

	bool StackWritten = (this->NegativeOffsetStackBytesWrittenByLoop[LoopNum].IsAnyBitSet() || this->PositiveOffsetStackBytesWrittenByLoop[LoopNum].IsAnyBitSet());
	bool IndirectStackWrites = this->LoopMemExprsExpandToStackOffsets[LoopNum];
	bool UsesSaveStackPtr = (StackWritten || IndirectStackWrites || HasArgs || HasMemExprs);

	this->AnalyzeLoopGlobals(FirstBlockNum, FollowBlockNum, LoopAddr, MemoryInput, MemoryOutput, LoopInputRegs, LoopOutputRegs, CalleePreservedRegs);
	MemoryOutput |= HasMemExprs;
	this->LoopWritesMemory[LoopNum] = MemoryOutput;
	this->OutputRegsByLoop[LoopNum] = LoopOutputRegs;
	this->CalleePreservedRegsByLoop[LoopNum] = CalleePreservedRegs;
	bool UsesStackPtrRegs = (LoopInputRegs[MD_STACK_POINTER_REG] || LoopOutputRegs[MD_STACK_POINTER_REG]
		|| (UseFP && (LoopInputRegs[MD_FRAME_POINTER_REG] || LoopOutputRegs[MD_FRAME_POINTER_REG])));
	if (!UsesStackPtrRegs && UsesSaveStackPtr) {
		// Stack pointer reg is only used in Ghost variable.
		LoopInputRegs.set(MD_STACK_POINTER_REG);
	}
	this->LoopUsesStackPtrRegs[LoopNum] = UsesStackPtrRegs;
	bool CalleeWrites = this->LoopHasCalleeMemWrites[LoopIndex];

	// Emit beginning of function body.
	if (HasArgs) {
		SMP_fprintf(BodyFile, "procedure %s(", ProcName.c_str());
		// Make EmitSPARKArgs write the signature to the BodyFile, not HeaderFile.
		this->EmitSPARKArgs(NULL, BodyFile, true, LoopNum);
		SMP_fprintf(BodyFile, ")\nis\n");
	}
	else {
		SMP_fprintf(BodyFile, "procedure %s\nis\n", ProcName.c_str());
	}
	// Keep track of the RSP and saved RBP from the original context of the loop.
	vector<SMPInstr *>::iterator FirstInstIter = CurrBlock->GetFirstInst();
	SMPInstr *FirstInst = (*FirstInstIter);
	STARS_sval_t IncomingStackDelta = FirstInst->GetStackPtrOffset();
	if (UsesStackPtrRegs || UsesSaveStackPtr) {
		if (0 != IncomingStackDelta)
			SMP_fprintf(BodyFile, "\tSaveStackPtr : Unsigned64 := X86.RSP + 16#%x# with Ghost;\n", (0 - IncomingStackDelta));
		else
			SMP_fprintf(BodyFile, "\tSaveStackPtr : Unsigned64 := X86.RSP with Ghost;\n");
		if (UseFP) {
			SMP_fprintf(BodyFile, "\tsavedRBP : Unsigned64 := X86.ReadMem64(X86.RBP) with Ghost;");
		}
	}
	SMP_fprintf(BodyFile, "\nbegin\n");
	if (UsesStackPtrRegs) {
		SMP_fprintf(BodyFile, "\tpragma Assume(X86.RSP = X86.DummyRSP);\n");
	}
	if ((this->LoopIterationsLimitExprs[LoopNum] != nullptr)
		&& (this->LoopAnalyzedBIVIters[LoopNum] != this->LoopInductionVars[LoopNum].end())) {
		// We analyzed the loop iterations count
		this->EmitSPARKLoopBIVLimits(BodyFile, LoopAddr, LoopNum, true); // pragma Assume(no wrap-around on loop limits)
	}

	// Emit procedure specification.
	this->EmitAnalysisProblemWarnings(HeaderFile, LoopIndex);
	if (HasArgs) {
		SMP_fprintf(HeaderFile, "procedure %s(", ProcName.c_str());
		this->EmitSPARKArgs(NULL, HeaderFile, true, LoopNum);
		SMP_fprintf(HeaderFile, ") with\n");
	}
	else {
		SMP_fprintf(HeaderFile, "procedure %s with\n", ProcName.c_str());
	}
	this->EmitSPARKLoopProcGlobals(BodyFile, HeaderFile, MemoryInput, MemoryOutput, LoopInputRegs, LoopOutputRegs, CalleePreservedRegs);

	// Emit pre-conditions.
	bool HasBIVInitialValue = (this->LoopAnalyzedBIVIters[LoopNum] != this->LoopInductionVars[LoopNum].end()); // successful loop analysis
	bool HasPreconditions = (HasNonStackPreconditions || (UseFP && UsesStackPtrRegs) || HasBIVInitialValue);
	this->LoopHasPreconditions[LoopNum] = HasPreconditions;
	
	if (HasPreconditions) {
		size_t OutputCount = 0;
		SMP_fprintf(HeaderFile, ",\n\tPre => ");
		if (UseFP && UsesStackPtrRegs) {
			int RBPRSPDelta = (int)(this->FramePointerStackDelta - IncomingStackDelta);
			if (RBPRSPDelta == 0) {
				SMP_fprintf(HeaderFile, "(X86.RBP = X86.RSP)");
			}
			else {
				SMP_fprintf(HeaderFile, "(X86.RBP = X86.RSP + %d)", RBPRSPDelta);
			}
			if (HasNonStackPreconditions || HasBIVInitialValue) {
				SMP_fprintf(HeaderFile, " and \n\t"); // prepare for more preconditions
			}
			++OutputCount;
		}

		bitset<1 + MD_LAST_REG_NO> RegsPrinted; // record regs printed to avoid redundant output
		if (HasBIVInitialValue) { // Need precondition for basic induction var incoming value
			// NOTE: We could have more than one BIV. Need to expand.
			STARSInductionVarFamilyList::const_iterator BIVIter = this->LoopAnalyzedBIVIters[LoopNum];
			assert(BIVIter != this->LoopInductionVars[LoopNum].end());
			STARS_ea_t BIVInitAddr = BIVIter->BIVIncomingDefAddr;
			assert(STARS_BADADDR != BIVInitAddr);
			STARSOpndTypePtr BIVDefOp = BIVIter->BasicInductionVar.InductionVar.GetOp();
			SMP_fprintf(HeaderFile, " (");
			FirstInst->PrintSPARKAdaOperand(BIVDefOp, HeaderFile, false, UseFP, true, false, false);
			SMP_fprintf(HeaderFile, "= ");
			this->LoopIterationsInitExprs[LoopNum]->EmitSPARKAda(HeaderFile, true, false, false, HasArgs, false);
			++OutputCount;
			if (this->LoopIterationsInitExprs[LoopNum]->IsStackPtrRegUsed()) {
				// Offset the stack pointer from its entry point value in the func that includes this loop.
				SMP_fprintf(HeaderFile, " + 16#%x# ", (0 - IncomingStackDelta));
			}
			SMP_fprintf(HeaderFile, ")");
			if (BIVDefOp->IsRegOp()) {
				RegsPrinted.set((size_t) BIVDefOp->GetReg());
			}

			// See if we have a non-constant limit for the BIV. If so, print its value, 
			//  which will be found in the right hand side of the LimitExpr.
			assert(LoopIndex < (int)this->LoopComparisonExprs.size());
			STARSOpndTypePtr LimitOp = this->LoopComparisonExprs[LoopNum].Operand2.GetOp();
			if (!LimitOp->IsImmedOp()) {
				assert(this->LoopIterationsLimitExprs[LoopNum]->IsRelationalExpr());
				SMP_fprintf(HeaderFile, " and \n\t"); // prepare for LimitOp precondition
				SMP_fprintf(HeaderFile, " (");
				FirstInst->PrintSPARKAdaOperand(LimitOp, HeaderFile, false, UseFP, true, false, false);
				SMP_fprintf(HeaderFile, "= ");
				bool SPRelative = false;
				if (this->LoopIterationsLimitExprs[LoopNum]->HasRightSubTree()) {
					this->LoopIterationsLimitExprs[LoopNum]->GetRightTree()->EmitSPARKAda(HeaderFile, true, false, false, HasArgs, false);
					SPRelative = this->LoopIterationsLimitExprs[LoopNum]->GetRightTree()->IsStackPtrRegUsed();
				}
				else { // RightOperand
					const STARSOpndTypePtr LimitInitOp = this->LoopIterationsLimitExprs[LoopNum]->GetConstRightOperand();
					FirstInst->PrintSPARKAdaOperand(LimitInitOp, HeaderFile, false, UseFP, true, false, false);
					SPRelative = (LimitInitOp->IsRegOp() && LimitInitOp->MatchesReg(MD_STACK_POINTER_REG));
				}
				if (SPRelative) {
					// Offset the stack pointer from its entry point value in the func that includes this loop.
					SMP_fprintf(HeaderFile, " + 16#%x# ", (0 - IncomingStackDelta));
				}
				SMP_fprintf(HeaderFile, ")");
				if (BIVDefOp->IsRegOp()) {
					RegsPrinted.set((size_t) BIVDefOp->GetReg());
				}
			} // end if (!LimitOp->IsImmedOp())
		} // end if (HasBIVInitialValue)

		if (HasNonStackPreconditions) {
			if (HasArgs && HasMemExprs) { // Need preconditions for memory writing range
				// First, print the incoming values of memory address registers that were
				//  expanded to produce a PreLoopDefAddr in the expr.
				size_t VectorLimit = this->TempNonRangeExprWidthIters.size();
				for (size_t VecIndex = 0; VecIndex < VectorLimit; ++VecIndex) {
					STARSExpression *MemExpr = (*(this->TempNonRangeExprWidthIters[VecIndex].second));
					MemExpr->PrintSPARKArgLocationStrings(HeaderFile, false, LoopNum, OutputCount, RegsPrinted);
				}
				VectorLimit = this->TempRangeExprWidthIters.size();
				for (size_t VecIndex = 0; VecIndex < VectorLimit; ++VecIndex) {
					STARSExpression *MemExpr = (*(this->TempRangeExprWidthIters[VecIndex].first));
					MemExpr->PrintSPARKArgLocationStrings(HeaderFile, false, LoopNum, OutputCount, RegsPrinted);
					MemExpr = (*(this->TempRangeExprWidthIters[VecIndex].second));
					MemExpr->PrintSPARKArgLocationStrings(HeaderFile, false, LoopNum, OutputCount, RegsPrinted);
				}
			}
			// Print the incoming values of regs that got copied to address registers for mem writes.
			if (HasMemExprs && (!this->LoopRegSourceExprPairs[LoopNum].empty())) {
				this->EmitIncomingLoopRegExprs(HeaderFile, LoopNum, false);
				++OutputCount;
			}

			// Now, print the ranges of memory written (or inherited) in this loop.
			bool PreRangeOutput = (0 < OutputCount);
			bool RangesStarted = false;

			if (HasRangeExprs) {
				if (OutputCount == 0) {
					SMP_fprintf(HeaderFile, "\n\t\t(for all i in Unsigned64 => (if X86.InMemoryRange(i, ");
				}
				else {
					SMP_fprintf(HeaderFile, " and \n\t\t(for all i in Unsigned64 => (if X86.InMemoryRange(i, ");
				}
				RangesStarted = true;
				OutputCount = 0; // reset before first call to EmitSPARKMemRange()
				size_t VectorLimit = this->TempRangeExprWidthIters.size();
				for (size_t VecIndex = 0; VecIndex < VectorLimit; ++VecIndex) {
					STARSExpression *LowerExpr = (*this->TempRangeExprWidthIters[VecIndex].first);
					STARSExpression *UpperExpr = (*this->TempRangeExprWidthIters[VecIndex].second);
					this->EmitSPARKMemRange(HeaderFile, true, true, HasArgs, LowerExpr, UpperExpr, OutputCount, 0);
				}
			} // end if (HasRangeExprs)

			if (HasRelationalExprs) {
				if (!RangesStarted) {
					if (PreRangeOutput) // something printed after "Pre =>" and before mem ranges
						SMP_fprintf(HeaderFile, " and \n\t\t(for all i in Unsigned64 => (if X86.InMemoryRange(i, ");
					else
						SMP_fprintf(HeaderFile, "\n\t\t(for all i in Unsigned64 => (if X86.InMemoryRange(i, ");
					RangesStarted = true;
				}

				OutputCount = 0; // reset before first call to EmitSPARKMemRange()
				std::list<std::pair<std::size_t, std::pair<STARSExprSetIter, STARSExprSetIter> > >::const_iterator RelationalIter;
				for (RelationalIter = this->RelationalMemWriteWidths[LoopNumPlusOne].cbegin(); RelationalIter != this->RelationalMemWriteWidths[LoopNumPlusOne].cend(); ++RelationalIter) {
					size_t MemWidth = (*RelationalIter).first;
					STARSExprSetIter LowerExprIter = (*RelationalIter).second.first;
					STARSExprSetIter UpperExprIter = (*RelationalIter).second.second;
					this->EmitSPARKMemRange(HeaderFile, true, true, HasArgs, (*LowerExprIter), (*UpperExprIter), OutputCount, MemWidth);
				}
			} // end if (HasRelationalExprs)

			if (HasNonRangeExprs) {
				size_t VectorLimit = this->TempNonRangeExprWidthIters.size();
				for (size_t VectorIndex = 0; VectorIndex < VectorLimit; ++VectorIndex) {
					// Easier in this loop to use X86.InRange64(i, X86.RAX, 8) than to use X86.InMemoryRange(i, X86.RAX, X86.RAX+8)
					//  which would require printing the same operand twice.
					size_t ByteWidth = this->TempNonRangeExprWidthIters[VectorIndex].first;
					STARSExpression *NonRangeExpr = (*this->TempNonRangeExprWidthIters[VectorIndex].second);

					// The StackPtrCase below is now handled via the {Posi,Nega}tiveStackOffsetWritesByLoop[] vectors
					if (!NonRangeExpr->IsStackPtrPlusOffset()) { // screen out stack frame writes
						if (!RangesStarted) {
							if (PreRangeOutput) // something printed after "Pre =>" and before mem ranges
								SMP_fprintf(HeaderFile, " and \n\t\t(for all i in Unsigned64 => (if X86.InRange64(i, ");
							else
								SMP_fprintf(HeaderFile, "\n\t\t(for all i in Unsigned64 => (if X86.InRange64(i, ");
							RangesStarted = true;
						}
						else {
							SMP_fprintf(HeaderFile, "\n\t\tor X86.InRange64(i, ");
						}
						// Some MemAddrExprs were based on the incoming stack pointer value for the function enclosing
						//  this loop. We need to make two adjustments when printing these exprs:
						//   1. Don't treat the incoming stack ptr reg as an InArg (i.e. don't make it look like an Ada argument).
						//   2. Offset the stack address with the negated incoming stack offset.
						bool StackPtrCase = (!NonRangeExpr->HasLeftSubTree()) && NonRangeExpr->GetConstLeftOperand()->MatchesReg(STARS_x86_R_sp);
						STARS_ea_t PreLoopDefAddr = NonRangeExpr->GetLeftPreLoopDefAddr();
						bool PseudoAddr = STARS_IsSSAMarkerPseudoID(PreLoopDefAddr) || STARS_IsBlockNumPseudoID(PreLoopDefAddr) || STARS_IsLiveInPseudoID(PreLoopDefAddr);
						if (!StackPtrCase && (!PseudoAddr)) {
							// We don't want to trace callee mem exprs all the way back to this function's InArgs.
							//  We only want to trace back to the value that comes into the loop. The loop boundary
							//  crossing is recorded in the expression.
							STARSExpression *LoopBoundaryExpr = NonRangeExpr->Clone();
							LoopBoundaryExpr->SetLeftOperand(LoopBoundaryExpr->FindLeftPreLoopDefOp());
							LoopBoundaryExpr->SetLeftUseAddr(LoopBoundaryExpr->GetLeftPreLoopDefAddr());
							LoopBoundaryExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, false, false, HasArgs, false);
						}
						else {
							NonRangeExpr->EmitSPARKAda(HeaderFile, !StackPtrCase, false, false, HasArgs, false);
							if ((0 != IncomingStackDelta) && StackPtrCase) {
								// Traced an outarg to a callee back to a stack pointer address. Offset by IncomingStackDelta.
								SMP_fprintf(HeaderFile, " + 16#%x#", IncomingStackDelta);
							}
						}
						SMP_fprintf(HeaderFile, ", %u) ", ByteWidth);
						++OutputCount;
					} // end if (!NonRangeExpr->IsStackPtrPlusOffset())
				} // end for VectorIndex over all NonRangeExprWidthIters
			} // end if (HasNonRangeExprs)

#if 0 // old code when we only emitted the loop mem range; part of TempRangeExprs now
			SMP_fprintf(HeaderFile, " and \n\t(for all i in Unsigned64 => (if X86.InMemoryRange(i, ");
			this->LoopMemWriteLowerBoundsExprs[LoopIndex]->EmitSPARKAda(HeaderFile, true, false, false, HasArgs, false);
			SMP_fprintf(HeaderFile, ", ");
			this->LoopMemWriteUpperBoundsExprs[LoopIndex]->EmitSPARKAda(HeaderFile, true, false, false, HasArgs, false);
			if (0 != IncomingStackDelta) {
				SMP_fprintf(HeaderFile, "then\n\t\tX86.InSafeRegion64(i, X86.RSP+%d)))", (0 - IncomingStackDelta));
			}
			else {
				SMP_fprintf(HeaderFile, "then\n\t\tX86.InSafeRegion64(i, X86.RSP)))");
			}
#endif			
			if (CalleeWrites) {
				SMP_fprintf(HeaderFile, "then\n\t\tX86.InSafeRegion64(i, X86.RSP-8)))");
			}
			else {
				SMP_fprintf(HeaderFile, "then\n\t\tX86.InSafeRegion64(i, X86.RSP)))");
			}
		}
		SMP_fprintf(HeaderFile, ",\n");  // terminate pre-condition section

	} // end if (HasPreconditions)

	// NOTE: The procedure post-conditions will be emitted in SMPInstr::EmitSPARKAdaLoopInvariants()

	return ProcName;
} // end of SMPFunction::EmitSPARKProcForLoopHeaderBlock()

bool SMPFunction::LoopRequiresNonStackPreconditions(size_t LoopIndex) {
	bool LoopWritePreconditions = ((nullptr != this->LoopIterationsInitExprs[LoopIndex])
		&& (!this->LoopMemWriteBoundsExprs[LoopIndex].empty()));
	if (LoopWritePreconditions) {
		STARSExprBoundsIter ExprIter;
		bool TraceableToSimpleStackOffset = this->LoopIterationsInitExprs[LoopIndex]->IsStackPtrPlusOffset();
		for (ExprIter = this->GetFirstLoopMemWriteExprBoundsIter(LoopIndex); TraceableToSimpleStackOffset && ExprIter != this->GetLastLoopMemWriteExprBoundsIter(LoopIndex); ++ExprIter) {
			STARSExpression *LowerExpr = ExprIter->first;
			STARSExpression *UpperExpr = ExprIter->second;
			if ((nullptr != LowerExpr) && (nullptr != UpperExpr)) {
				TraceableToSimpleStackOffset = TraceableToSimpleStackOffset || LowerExpr->IsStackPtrPlusOffset()
					|| UpperExpr->IsStackPtrPlusOffset();
			}
		}
		this->LoopMemExprsExpandToStackOffsets[LoopIndex] = TraceableToSimpleStackOffset;
		LoopWritePreconditions = (!TraceableToSimpleStackOffset);
	}
#if 1
	// If we don't already need preconditions based on loop mem writes, examine
	//  looping string mem writes, inherited from callees mem writes.
	if (!LoopWritePreconditions) {
		LoopWritePreconditions = ((!this->MemAddrExprWidthsFromCallees[LoopIndex + 1].empty())
			&& (!this->LoopMemAddrExprWidthsFromCalleeLoops[LoopIndex + 1].empty())
			&& (!this->RelationalMemWriteWidths[LoopIndex + 1].empty()));
	}
#endif
	return LoopWritePreconditions;
} // end of SMPFunction::LoopRequiresNonStackPreconditions()

// Analyze reg and mem accesses in loop
void SMPFunction::AnalyzeLoopGlobals(int HeaderBlockNum, int FollowBlockNum, STARS_ea_t LoopAddr, bool &MemoryInput, bool &MemoryOutput, std::bitset<1 + MD_LAST_REG_NO> &InputRegs, std::bitset<1 + MD_LAST_REG_NO> &OutputRegs, std::bitset<1 + MD_LAST_REG_NO> &CalleePreservedRegs) {
	assert((0 <= HeaderBlockNum) && (HeaderBlockNum < (int)this->RPOBlocks.size()));
	MemoryInput = false;
	MemoryOutput = false;

	// We need something like the UpExposed set for HeaderBlockNum and the union of the VarKill sets
	//  of all blocks in the procedure or loop. But the extra USEs and DEFs that are added to CALL 
	//  and RETURN instructions are not desired for SPARK Ada analyses, so we must go through all the
	//  instructions and exclude CALL and RETURN types and build up the input and output data flow sets.

	SMPBasicBlock *LoopHeaderBlock = this->GetBlockFromInstAddr(LoopAddr);
	int TempLoopNum = this->GetLoopNumFromHeaderBlockNum(LoopHeaderBlock->GetNumber());
	assert(0 <= TempLoopNum);
	size_t LoopNum = (size_t) TempLoopNum;
	list<size_t> LoopBlockList;
	this->BuildLoopBlockList(LoopNum, LoopBlockList);

	for (list<size_t>::const_iterator BlockIter = LoopBlockList.cbegin(); BlockIter != LoopBlockList.cend(); ++BlockIter) {
		size_t BlockNum = *BlockIter;
		assert(BlockNum < this->RPOBlocks.size());
		SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockNum];
		for (vector<SMPInstr *>::iterator InstIter = CurrBlock->GetFirstInst(); InstIter != CurrBlock->GetLastInst(); ++InstIter) {
			SMPInstr *CurrInst = (*InstIter);
			STARS_ea_t InstAddr = CurrInst->GetAddr();
			SMPitype FlowType = CurrInst->GetDataFlowType();
			if ((DEFAULT != FlowType) && (JUMP != FlowType) && (INDIR_JUMP != FlowType)) {
				if ((CALL == FlowType) || (INDIR_CALL == FlowType) || (RETURN == FlowType)) {
					// Calls change the stack pointer.
					InputRegs.set((size_t) MD_STACK_POINTER_REG);
					OutputRegs.set((size_t) MD_STACK_POINTER_REG);
					// Inherit from callee. NOTE: RETURN could be tail call, INDIR_CALL could be resolved.
					STARS_ea_t CalleeAddr = CurrInst->GetCallTarget();
					if (STARS_BADADDR != CalleeAddr) {
						SMPFunction *CalleeFunc = this->GetProg()->FindFunction(CalleeAddr);
						if (nullptr != CalleeFunc) {
							InputRegs |= CalleeFunc->GetInputRegs();
							std::bitset<1 + MD_LAST_REG_NO> TempCalleeOutputRegs = CalleeFunc->GetOutputRegs();
							std::bitset<1 + MD_LAST_REG_NO> TempCalleePreservedRegs = CalleeFunc->GetPreservedRegs();
							std::bitset<1 + MD_LAST_REG_NO> TempCalleeChainPreservedRegs = CalleeFunc->GetCalleePreservedRegs();
							for (size_t RegNo = 0; RegNo < TempCalleeOutputRegs.size(); ++RegNo) {
								if (TempCalleeOutputRegs[RegNo]) {
									// If RegNo is Preserved, record in CalleePreservedRegs argument.
									if (TempCalleePreservedRegs[RegNo]) {
										CalleePreservedRegs.set(RegNo);
									}
									else {
										// Altered, not preserved
										OutputRegs.set(RegNo);
									}
								}
								else if (TempCalleePreservedRegs[RegNo]) { // preserved in callee, not explicitly altered
									CalleePreservedRegs.set(RegNo);
								}
								else if (TempCalleeChainPreservedRegs[RegNo]) { // preserved in callee chain, not explicitly altered
									CalleePreservedRegs.set(RegNo);
								}
							}
							MemoryOutput |= CalleeFunc->AltersSPARKMemory();
						}
					}
				}
				continue;  // exclude CALL, INDIR_CALL, RETURN with their conservative USE and DEF lists
			}
			for (STARSDefUseIter UseIter = CurrInst->GetFirstUse(); UseIter != CurrInst->GetLastUse(); ++UseIter) {
				STARSOpndTypePtr UseOp = UseIter->GetOp();
				if (UseOp->IsRegOp()) {
					STARS_regnum_t RegNo = UseOp->GetReg();
					if (!OutputRegs[(size_t) RegNo]) { // UpExposed, USE before DEF in RPO order traversal
						InputRegs.set((size_t) RegNo, true);
					}
				}
				else if (UseOp->IsMemOp()) {
					MemoryInput = true;
				}
			} // end for all USEs

			for (STARSDefUseIter DefIter = CurrInst->GetFirstDef(); DefIter != CurrInst->GetLastDef(); ++DefIter) {
				STARSOpndTypePtr DefOp = DefIter->GetOp();
				if (DefOp->IsRegOp()) {
					STARS_regnum_t RegNo = DefOp->GetReg();
					OutputRegs.set((size_t)RegNo, true);
				}
				else if (DefOp->IsMemOp()) {
					MemoryOutput = true;
				}
			} // end for all DEFs
		} // end for all insts in block
		if (!MemoryOutput && CurrBlock->HasMemoryWrite()) {
			SMP_msg("INFO: SPARK: Found new MemoryOutput using HasMemoryWrite\n");
			MemoryOutput = true;  // could be non-stack mem, not in DEFs
		}
	} // end for all blocks

	if (!OutputRegs[MD_STACK_POINTER_REG]) {
		// See if we have an implicit use of the stack pointer reg via loop mem write exprs that
		//  will expand to be stack pointer +/- offset.
		if (this->LoopMemExprsExpandToStackOffsets[LoopNum]) {
			InputRegs.set(MD_STACK_POINTER_REG);
			SMP_msg("INFO: SPARK: Loop %zu at %llx has SP reg use only implicitly.\n", LoopNum, (uint64_t) LoopAddr);
		}
	}
	LoopBlockList.clear();
	return;
} // end of SMPFunction::AnalyzeLoopGlobals()

// emit Input, Output, In_Out flow annotations
void SMPFunction::EmitSPARKLoopProcGlobals(FILE *BodyFile, FILE *HeaderFile, bool MemoryInput, bool MemoryOutput, const bitset<1 + MD_LAST_REG_NO> &InputRegs, const bitset<1 + MD_LAST_REG_NO> &OutputRegs, const bitset<1 + MD_LAST_REG_NO> &CalleePreservedRegs) {
	bool FlagsInput = false;
	bool FlagsOutput = false;
	bool UseFP = this->UsesFramePointer();

	FlagsInput = InputRegs[MD_FLAGS_REG];
	FlagsOutput = OutputRegs[MD_FLAGS_REG] || CalleePreservedRegs[MD_FLAGS_REG];

	// If a variable is Input but not Output, it gets the SPARK Ada keyword "Input"
	size_t ByteWidth = global_STARS_program->GetSTARS_ISA_Bytewidth();
	SMP_fprintf(HeaderFile, "\tGlobal => (");
	size_t TotalOutputCount = 0;
	size_t OutputCount = 0;
	bool FPStackBelowTopPrinted = false;

	for (size_t OutputIndex = 0; OutputIndex < OutputRegs.size(); ++OutputIndex) {
		if (OutputIndex == MD_FLAGS_REG)
			continue;
		bool FPRegBelowStackTop = ((OutputIndex >= STARS_x86_R_st1) && (OutputIndex <= STARS_x86_R_st7));
		if (!(FPRegBelowStackTop && FPStackBelowTopPrinted)) { // avoid duplicate X86.FloatingPointStackDummy1
			if (OutputRegs[OutputIndex] || CalleePreservedRegs[OutputIndex]) {
				if (0 == OutputCount) {
					if (0 < TotalOutputCount) {
						SMP_fprintf(HeaderFile, ",\n\t\t");  // line up with previous Global lines
					}
					SMP_fprintf(HeaderFile, "In_Out => (X86.%s", MDGetSPARKRegNumName((STARS_regnum_t)OutputIndex, (uint16_t)ByteWidth));
				}
				else {
					SMP_fprintf(HeaderFile, ", X86.%s", MDGetSPARKRegNumName((STARS_regnum_t)OutputIndex, (uint16_t)ByteWidth));
				}
				++OutputCount;
				FPStackBelowTopPrinted |= FPRegBelowStackTop;
			}
		}
	} // for all OutputRegs
	if (MemoryOutput) {
		if (0 == OutputCount) {
			SMP_fprintf(HeaderFile, "In_Out => (X86.Memory");
		}
		else {
			SMP_fprintf(HeaderFile, ", X86.Memory");
		}
		++OutputCount;
	}
	if (FlagsOutput) {
		if (0 == OutputCount) {
			if (0 < TotalOutputCount) {
				SMP_fprintf(HeaderFile, ",\n\t\t");  // line up with previous Global lines
			}
			SMP_fprintf(HeaderFile, "In_Out => (X86.CarryFlag, X86.ParityFlag, X86.SignFlag, X86.OverflowFlag, X86.ZeroFlag");
		}
		else {
			SMP_fprintf(HeaderFile, ", X86.CarryFlag, X86.ParityFlag, X86.SignFlag, X86.OverflowFlag, X86.ZeroFlag");
		}
		++OutputCount;
	}
	if (0 < OutputCount) {
		SMP_fprintf(HeaderFile, ")"); // close the In_Out open parenthesis
	}
	TotalOutputCount += OutputCount;

	OutputCount = 0;
	for (size_t InputIndex = 0; InputIndex < InputRegs.size(); ++InputIndex) {
		if (InputIndex == MD_FLAGS_REG)
			continue;
		if (InputRegs[InputIndex] && (!OutputRegs[InputIndex]) && (!CalleePreservedRegs[InputIndex])) {
			bool FPRegBelowStackTop = ((InputIndex >= STARS_x86_R_st1) && (InputIndex <= STARS_x86_R_st7));
			if (!(FPRegBelowStackTop && FPStackBelowTopPrinted)) { // avoid duplicate X86.FloatingPointStackDummy1
				if (0 == OutputCount) {
					if (0 < TotalOutputCount) {
						SMP_fprintf(HeaderFile, ",\n\t\t");  // line up with previous Global lines
					}
					SMP_fprintf(HeaderFile, "Input => (X86.%s", MDGetSPARKRegNumName((STARS_regnum_t)InputIndex, (uint16_t)ByteWidth));
				}
				else {
					SMP_fprintf(HeaderFile, ", X86.%s", MDGetSPARKRegNumName((STARS_regnum_t)InputIndex, (uint16_t)ByteWidth));
				}
				++OutputCount;
				FPStackBelowTopPrinted |= FPRegBelowStackTop;
			}
		}
	} // end for all InputRegs
	if (MemoryInput && (!MemoryOutput)) {
		if (0 == OutputCount) {
			if (0 < TotalOutputCount) {
				SMP_fprintf(HeaderFile, ",\n\t\t");  // line up with previous Global lines
			}
			SMP_fprintf(HeaderFile, "Input => (X86.Memory");
		}
		else {
			SMP_fprintf(HeaderFile, ", X86.Memory");
		}
		++OutputCount;
	}
	if (FlagsInput && (!FlagsOutput)) {
		if (0 == OutputCount) {
			SMP_fprintf(HeaderFile, "Input => (X86.CarryFlag, X86.ParityFlag, X86.SignFlag, X86.OverflowFlag, X86.ZeroFlag");
		}
		else {
			SMP_fprintf(HeaderFile, ", X86.CarryFlag, X86.ParityFlag, X86.SignFlag, X86.OverflowFlag, X86.ZeroFlag");
		}
		++OutputCount;
	}
	if (0 < OutputCount) {
		SMP_fprintf(HeaderFile, ")"); // close the Input open parenthesis
	}
	TotalOutputCount += OutputCount;

	SMP_fprintf(HeaderFile, ")"); // close the Global open parenthesis

	return;
} // end of SMPFunction::EmitSPARKLoopProcGlobals()

// recursive descent translation to SPARK Ada starting with CurrBlock, stop before Follow Block
void SMPFunction::EmitSPARKAdaForBlock(int CurrBlockNum, int FollowBlockNum, FILE *SPARKBodyFile, bool ReadytoEmitSwitchDefault, bool LoopToProc) {
	if (LoopToProc && (!this->IsSPARKLoopInTranslationStack())) {
		// See if we are beginning to translate a guarded loop.
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(CurrBlockNum);
		STARS_ea_t LastAddr = CurrBlock->GetLastAddr();
		STARS_ea_t FirstAddr = CurrBlock->GetFirstNonMarkerAddr();
		ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
		SMPInstr *FirstInst = this->GetInstFromAddr(FirstAddr);
		SMPInstr *LastInst = this->GetInstFromAddr(LastAddr);
		if ((LastCFType == BRANCH_IF_THEN) && FirstInst->HasBeenTranslatedToSPARK() && (!LastInst->HasBeenTranslatedToSPARK())) {
			// We are beginning to translate a guarded loop. Only the COND_BRANCH statement
			//  at the end of CurrBlockNum needs to be translated, via EmitSPARKAdaForConditional().
			//  The rest of the block has been translated.
			//  After the recursive descent into EmitSPARKAdaForConditional(), we are done with
			//  this guarded loop.
			this->EmitSPARKAdaForConditional(CurrBlockNum, FollowBlockNum, SPARKBodyFile);
			return;
		}
	}

	// Recursive descent based on the kind of block CurrBlock is.
	while ((CurrBlockNum != FollowBlockNum) && (CurrBlockNum >= 0)) {
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(CurrBlockNum);
		if (CurrBlock->IsProcessed())
			break;

		int ResumeBlockNum = FollowBlockNum;
		if (CurrBlock->IsLoopHeaderBlock()) {  // !!!!****!!!! Fix for funcs that start with a loop header
			int LoopNum = this->GetLoopNumFromHeaderBlockNum(CurrBlockNum);
			assert(0 <= LoopNum);
			STARS_ea_t LoopAddr = CurrBlock->GetFirstNonMarkerAddr();
			bool TranslatingGuardedLoop = (this->LoopToGuardMap.find(LoopAddr) != this->LoopToGuardMap.cend());
			int NextFollowBlockNum = this->LoopFollowNodes[LoopNum];
			assert(SMP_BLOCKNUM_UNINIT != NextFollowBlockNum);
			if (LoopToProc) {
				assert(TranslatingGuardedLoop || (ResumeBlockNum == NextFollowBlockNum)); // should be passed in as loop follow block number
				this->EmitSPARKAdaForLoop(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile); // recurse and resume
			}
			else { // !LoopToProc case
				// Generate procedure name for the loop being converted into a procedure.
				// If loop starts at 0x8048ca0, we want ZSTLoopProc_8048ca0 as the proc name.
				this->EmitSPARKAdaLoopCall(LoopAddr, (size_t) LoopNum, SPARKBodyFile);
				ResumeBlockNum = NextFollowBlockNum;
				pair<int, int> BlockItem(CurrBlockNum, ResumeBlockNum);
				pair<int, pair<int, int> > WorkListItem(LoopNum, BlockItem);
				this->SPARKLoopWorkList.push_back(WorkListItem);
			}
		}
		else if (CurrBlock->IsLoopTailBlock()) {
			// assert(false);
			// We are at the end of a recursive descent into a loop. Translate the block and return to EmitSPARKAdaForLoop().
			if (CurrBlock->HasLoopHeadWithInvertedExitAsSuccessor()) {
				// Last inst in block is transfer to loop head. Need to ensure that we don't
				//  emit "end loop;" before we return to our caller and emit "end if;" in order
				//  to get proper nesting of control structures. 
				ControlFlowType LastCFType = CurrBlock->GetLastInstCFType();
				vector<SMPInstr *>::const_reverse_iterator LastInstIter = CurrBlock->GetRevInstCBegin();
				SMPInstr *LastInst = (*LastInstIter);
				SMPitype FlowType = LastInst->GetDataFlowType();
				if (JUMP == FlowType) {
					// One branch of an if-then-else can fall through to the inverted-exit loop head.
					//  The other branch jumps to the loop header.
					CurrBlock->EmitSPARKAdaForFallThroughInsts(SPARKBodyFile);
					SPARKTranslationCFType CurrentCFType = this->SPARKControlStack.back();
					if (SPARK_LOOP == CurrentCFType) {
						// Must be called from EmitSPARKAdaForLoop(); time for "end loop;" without "end if;"
						--STARS_SPARK_IndentCount;
						PrintSPARKIndentTabs(SPARKBodyFile);
						SMP_fprintf(SPARKBodyFile, "end loop;\n\n");
						LastInst->SetSPARKTranslated();
					}
					else if (SPARK_ELSE_CLAUSE == CurrentCFType) {
						// Must be called from EmitSPARKAdaForConditional(); time for "end if;" and then "end loop;"
						--STARS_SPARK_IndentCount;
						PrintSPARKIndentTabs(SPARKBodyFile);
						SMP_fprintf(SPARKBodyFile, "end if;\n\n");
						--STARS_SPARK_IndentCount;
						PrintSPARKIndentTabs(SPARKBodyFile);
						SMP_fprintf(SPARKBodyFile, "end loop;\n\n");
						LastInst->SetSPARKTranslated();
					}
					else { // mysterious
						LastInst->EmitSPARKAda(SPARKBodyFile);
					}
				}
				else {
					CurrBlock->EmitSPARKAdaForAllInsts(SPARKBodyFile);
				}
			}
			else {
				CurrBlock->EmitSPARKAdaForAllInsts(SPARKBodyFile);
			}
			return;
		}
		else if (CurrBlock->IsSwitchDefaultCase() && (!ReadytoEmitSwitchDefault)) {
			// We can encounter the default case before the INDIR_JUMP for a switch.
			//  EmitSPARKAdaForSwitch() will emit the default case last.
			int NextFollowBlockNum = this->FindFollowBlockNum(CurrBlock, false);
			this->EmitSPARKAdaForSwitch(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile);
			ResumeBlockNum = NextFollowBlockNum;
		}
		else { // Not a special block, or it is a ReadytoEmitSwitchDefault block.
			// Translate instructions in current block, then see if block terminating inst
			//  tells us what control structure we are entering, e.g. loop or switch-case.
			CurrBlock->EmitSPARKAdaForFallThroughInsts(SPARKBodyFile); // all except last inst if it is control flow terminator for block
			vector<SMPInstr *>::reverse_iterator LastInstIter = CurrBlock->GetRevInstBegin();
			SMPInstr *LastInst = (*LastInstIter);
			SMPitype FlowType = LastInst->GetDataFlowType();
			if (FlowType != DEFAULT) {
				STARS_ea_t LastAddr = LastInst->GetAddr();
				ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
				if (FlowType == INDIR_JUMP) {
					// Translate switch-case statement.
					int NextFollowBlockNum = this->FindFollowBlockNum(CurrBlock, true);
					this->EmitSPARKAdaForSwitch(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile);
					ResumeBlockNum = NextFollowBlockNum;
					LastInst->SetSPARKTranslated();
				}
				else if (FlowType == COND_BRANCH) {
					if (LastCFType == JUMP_TO_SWITCH_INDIR_JUMP) {
						// conditional branch to INDIR_JUMP is not well-structured; should not occur
						SMP_msg("FATAL ERROR: COND_BRANCH of type JUMP_TO_SWITCH_INDIR_JUMP at %llx\n",
							(unsigned long long) LastAddr);
						assert(LastCFType != JUMP_TO_SWITCH_INDIR_JUMP);
					}
					else if (LastCFType == JUMP_TO_DEFAULT_CASE) {  
						// jump to default case is not translated directly
						// We do not follow the branch to the default case; that case will be translated when the 
						//  switch statement is translated. Instead, we go to the fall-through block. We need to
						//  translate all of the non-branching instructions that lead up to the switch statement in
						//  order to get the switch index value computed properly, e.g:
						//  cmp eax,8        ; translate, but we don't really need it in most cases
						//  jb default_case  ; no translation
						//  sub eax,8        ; need this translation
						//  cmp eax,12       ; translate, don't need it in most cases
						//  ja default_case  ; don't translate
						//  jmp [memaddr + eax*4]  ; translate to "case X86.EAX is"
						list<SMPBasicBlock *>::const_iterator FallThroughIter = CurrBlock->GetFallThroughSucc();
						assert(FallThroughIter != CurrBlock->GetLastConstSucc()); // COND_BRANCH always has a fall-through
						ResumeBlockNum = (*FallThroughIter)->GetNumber();
						LastInst->SetSPARKTranslated();
					}
					else if (IsLoopExitFlow(LastCFType)) {
						// Middle exit, else it would be loop header or tail handled above.
						LastInst->EmitSPARKAda(SPARKBodyFile); // emit exit when (condition)
					}
					else {
						assert((LastCFType == BRANCH_IF_THEN) || (LastCFType == BRANCH_IF_THEN_ELSE) || (LastCFType == SHORT_CIRCUIT_BRANCH));
						// Translate if-else structure previously identified
						int NextFollowBlockNum = this->FindFollowBlockNum(CurrBlock, true);
						this->EmitSPARKAdaForConditional(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile);
						ResumeBlockNum = NextFollowBlockNum;
					}
				}
				else if (FlowType == JUMP) {
					// Get control flow type of the jump
					assert(1 == CurrBlock->GetNumSuccessors());
					if (JUMP_INTO_LOOP_TEST == LastCFType) {
						// Optimized top-testing loop becomes a bottom-testing loop with a jump to the loop header/loop-test
						//  block for the first iteration. We translate as a top-testing loop in Ada by starting with the 
						//  target block of the jump.
						SMPBasicBlock *NextBlock = (*(CurrBlock->GetFirstSucc()));
						assert(NextBlock->IsLoopHeaderBlock());
						int NextBlockNum = NextBlock->GetNumber();
						int LoopNum = this->GetLoopNumFromHeaderBlockNum(NextBlockNum);
						assert(0 <= LoopNum);
						int NextFollowBlockNum = this->LoopFollowNodes[LoopNum];
						assert(SMP_BLOCKNUM_UNINIT != NextFollowBlockNum);
						if (LoopToProc) {
							this->EmitSPARKAdaForLoop(NextBlockNum, NextFollowBlockNum, SPARKBodyFile); // recurse and return
							ResumeBlockNum = NextFollowBlockNum;
							LastInst->SetSPARKTranslated();
						}
						else { // !LoopToProc case
							// Generate procedure name for the loop being converted into a procedure.
							// If loop starts at 0x8048ca0, we want ZSTLoopProc_8048ca0 as the proc name.
							STARS_ea_t LoopAddr = NextBlock->GetFirstAddr();
							this->EmitSPARKAdaLoopCall(LoopAddr, (size_t) LoopNum, SPARKBodyFile);
							ResumeBlockNum = NextFollowBlockNum;
							LastInst->SetSPARKTranslated();
							pair<int, int> BlockItem(NextBlockNum, ResumeBlockNum);
							pair<int, pair<int, int> > WorkListItem(LoopNum, BlockItem);
							this->SPARKLoopWorkList.push_back(WorkListItem);
						}
					}
					else if ((JUMP_TO_DEFAULT_CASE == LastCFType) || (JUMP_TO_SWITCH_INDIR_JUMP == LastCFType)) {
						; // jump to default case is not translated directly; ditto for jump around default case to INDIR_JUMP
						LastInst->SetSPARKTranslated();
					}
					else {
						// All other cases should be handled in SMPInstr::EmitSPARKAda().
						LastInst->EmitSPARKAda(SPARKBodyFile);
						// Handle special case in which a JUMP just goes to the lone successor,
						//  which is equivalent to falling through.
						// if ((FALL_THROUGH == LastCFType) && (ResumeBlockNum < 0)) {
						if (FALL_THROUGH == LastCFType) {
							ResumeBlockNum = (*(CurrBlock->GetFirstConstSucc()))->GetNumber();
						}
					}
				}
				else if (FlowType == RETURN) {
					LastInst->EmitSPARKAda(SPARKBodyFile);
					// Handle possible tail call case
					;
				}
				else if (FlowType == CALL) {
					LastInst->EmitSPARKAda(SPARKBodyFile);
					if (LastCFType == FALL_THROUGH) {
						// if ((FALL_THROUGH == LastCFType) && (ResumeBlockNum < 0)) {
						if (FALL_THROUGH == LastCFType) {
							list<SMPBasicBlock *>::const_iterator SuccIter = CurrBlock->GetFirstConstSucc();
							if (SuccIter != CurrBlock->GetLastConstSucc()) {
								ResumeBlockNum = (*SuccIter)->GetNumber();
							}
						}
					}
				}
			}
			else {
				// Last inst was translated by SMPBasicBlock::EmitSPARKAdaForFallThroughInsts().
				//  CurrBlock can only have one successor, a fall-through block.
				// NOTE: If CurrBlock ends with a HALT or a call to a NORET function, we can have no successors.
				size_t NumSuccessors = CurrBlock->GetNumSuccessors();
				if (1 == NumSuccessors) {
					SMPBasicBlock *FallThroughSucc = (*(CurrBlock->GetFirstSucc()));
					assert(NULL != FallThroughSucc);
					ResumeBlockNum = FallThroughSucc->GetNumber();
				}
				else {
					assert(((FlowType == CALL) || (FlowType == HALT)) && (0 == NumSuccessors));
				}
			}
		} // end if header block ... elsif tail block ... else ...
		CurrBlockNum = ResumeBlockNum;
	} // end while good CurrBlockNum

	return;
} // end of SMPFunction::EmitSPARKAdaForBlock()

// recursive descent translation of loop to SPARK Ada starting with header CurrBlock, stop before Follow Block
void SMPFunction::EmitSPARKAdaForLoop(int HeaderBlockNum, int FollowBlockNum, FILE *SPARKBodyFile) {
	if (HeaderBlockNum == FollowBlockNum)
		return;  // end recursion if Follow Block is seen

	int CurrBlockNum = HeaderBlockNum;
	int ResumeBlockNum = FollowBlockNum;
	this->SPARKControlStack.push_back(SPARK_LOOP);

	while ((CurrBlockNum != FollowBlockNum) && (CurrBlockNum >= 0)) {
		assert((0 <= CurrBlockNum) && (CurrBlockNum < (int)this->RPOBlocks.size()));
		SMPBasicBlock *CurrBlock = this->GetBlockByNum(CurrBlockNum);
		if (CurrBlock->IsProcessed())
			break;

		bool StartingLoop = (CurrBlock->IsLoopHeaderBlock() && (CurrBlockNum == HeaderBlockNum));

		// Recursive descent based on the kind of block CurrBlock is.
		if (CurrBlock->IsLoopHeaderBlock() && (!StartingLoop)) {
			// We have fallen through to an inner loop. Emit a call to the loop proc created later, and jump past inner loop.
			int LoopNum = this->GetLoopNumFromHeaderBlockNum(CurrBlockNum);
			assert(0 <= LoopNum);
			int NextFollowBlockNum = this->LoopFollowNodes[LoopNum];
			assert(SMP_BLOCKNUM_UNINIT != NextFollowBlockNum);
			this->EmitSPARKAdaLoopCall(CurrBlock->GetFirstAddr(), (size_t) LoopNum, SPARKBodyFile); 
			ResumeBlockNum = NextFollowBlockNum;
			pair<int, int> BlockItem(CurrBlockNum, ResumeBlockNum);
			pair<int, pair<int, int> > WorkListItem(LoopNum, BlockItem);
			this->SPARKLoopWorkList.push_back(WorkListItem);
		}
		else if (CurrBlock->IsLoopTailBlock()) {
			// We are at the end of a recursive descent into a loop. Translate the block and return.
			// NOTE: What about one-block loops, where block is header block and tail block?   !!!!****!!!!
			CurrBlock->EmitSPARKAdaForAllInsts(SPARKBodyFile);
			break;
		}
		else if (CurrBlock->IsSwitchDefaultCase()) {
			// Translate switch-case statement.
			int NextFollowBlockNum = this->FindFollowBlockNum(CurrBlock, false);
			this->EmitSPARKAdaForSwitch(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile);
			ResumeBlockNum = NextFollowBlockNum;
		}
		else {
			// Translate instructions in current block, then see if block terminating inst
			//  tells us what control structure we are entering, e.g. loop or switch-case.
			CurrBlock->EmitSPARKAdaForFallThroughInsts(SPARKBodyFile); // all except last inst if it is control flow terminator for block
			vector<SMPInstr *>::reverse_iterator LastInstIter = CurrBlock->GetRevInstBegin();
			SMPInstr *LastInst = (*LastInstIter);
			STARS_ea_t LastAddr = LastInst->GetAddr();
			SMPitype FlowType = LastInst->GetDataFlowType();
			if (LastInst->IsBasicBlockTerminator()) {
				ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
				if (FlowType == INDIR_JUMP) {
					// Translate switch-case statement.
					int NextFollowBlockNum = this->FindFollowBlockNum(CurrBlock, true);
					this->EmitSPARKAdaForSwitch(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile);
					ResumeBlockNum = NextFollowBlockNum;
				}
				else if (FlowType == COND_BRANCH) {
					if (LastCFType == JUMP_TO_SWITCH_INDIR_JUMP) {
						// conditional branch to INDIR_JUMP is not well-structured; should not occur
						SMP_msg("FATAL ERROR: COND_BRANCH of type JUMP_TO_SWITCH_INDIR_JUMP at %llx\n",
							(unsigned long long) LastAddr);
						assert(LastCFType != JUMP_TO_SWITCH_INDIR_JUMP);
					}
					else if (IsLoopExitFlow(LastCFType)) {
						// Middle exit, else it would be loop header or tail handled above.
						LastInst->EmitSPARKAda(SPARKBodyFile); // emit exit when (condition)
						// We want to resume at the fall-through block, if it is in the loop,
						//  which it should be because this is not the tail block.
						list<SMPBasicBlock *>::const_iterator SuccIter;
						if (INVERTED_LOOP_EXIT != LastCFType)
							SuccIter = CurrBlock->GetFallThroughSucc();
						else // must take the branch to stay in the loop
							SuccIter = CurrBlock->GetCondNonFallThroughSucc();
						assert(SuccIter != CurrBlock->GetLastConstSucc()); // COND_BRANCH must have fall through and non-fall-through
						ResumeBlockNum = (*SuccIter)->GetNumber();
					}
					else if ((LastCFType == BRANCH_IF_THEN) || (LastCFType == BRANCH_IF_THEN_ELSE) || (LastCFType == SHORT_CIRCUIT_BRANCH)) {
						// Translate if-else structure previously identified
						int NextFollowBlockNum = this->FindFollowBlockNum(CurrBlock, true);
						this->EmitSPARKAdaForConditional(CurrBlockNum, NextFollowBlockNum, SPARKBodyFile);
						ResumeBlockNum = NextFollowBlockNum;
					}
					else if (LastCFType != JUMP_TO_DEFAULT_CASE) {
						LastInst->EmitSPARKAda(SPARKBodyFile); // handles loop-back case
						// If this is the optimized top-testing block in which the header was moved to the bottom
						//  position of the loop, then we will have a LOOP_BACK control flow type rather than a
						//  LOOP_EXIT type for the COND_BRANCH at the end of the header block. LastInst->EmitSPARKAda()
						//  will emit an inverted-condition loop exit, but now we need to fall through into the successor
						//  block that is within our loop.
						if (CurrBlock->IsOptimizedTopLoopTest()) {
							assert(LOOP_BACK == this->GetControlFlowType(LastAddr));
							int LoopNum = this->GetLoopNumFromHeaderBlockNum(CurrBlock->GetNumber());
							assert(0 <= LoopNum);
							for (list<SMPBasicBlock *>::iterator SuccIter = CurrBlock->GetFirstSucc(); SuccIter != CurrBlock->GetLastSucc(); ++SuccIter) {
								SMPBasicBlock *SuccBlock = (*SuccIter);
								int SuccBlockNum = SuccBlock->GetNumber();
								if (this->IsBlockInLoop(SuccBlockNum, LoopNum)) {
									ResumeBlockNum = SuccBlockNum;
									break;
								}
							}
						}
					}
					else { // JUMP_TO_DEFAULT_CASE
						// See comments in EmitSPARKAdaForBlock() for the JUMP_TO_DEFAULT_CASE issues.
						//  Here, we are starting to encounter a switch statement inside our loop.
						list<SMPBasicBlock *>::const_iterator FallThroughIter = CurrBlock->GetFallThroughSucc();
						assert(FallThroughIter != CurrBlock->GetLastConstSucc()); // COND_BRANCH always has a fall-through
						ResumeBlockNum = (*FallThroughIter)->GetNumber();
						LastInst->SetSPARKTranslated();
					}
				}
				else if (FlowType == JUMP) {
					// Get control flow type of the jump
					assert(1 == CurrBlock->GetNumSuccessors());
					if (JUMP_INTO_LOOP_TEST == LastCFType) {
						// Optimized top-testing loop becomes a bottom-testing loop with a jump to the loop header/loop-test
						//  block for the first iteration. We translate as a top-testing loop in Ada by starting with the 
						//  target block of the jump. At this level, we emit a call and resume after the inner loop.
						SMPBasicBlock *NextBlock = (*(CurrBlock->GetFirstSucc()));
						assert(NextBlock->IsLoopHeaderBlock());
						int NextBlockNum = NextBlock->GetNumber();
						int LoopNum = this->GetLoopNumFromHeaderBlockNum(NextBlockNum);
						assert(0 <= LoopNum);
						int NextFollowBlockNum = this->LoopFollowNodes[LoopNum];
						assert(SMP_BLOCKNUM_UNINIT != NextFollowBlockNum);
						this->EmitSPARKAdaLoopCall(NextBlock->GetFirstAddr(), (size_t) LoopNum, SPARKBodyFile);
						ResumeBlockNum = NextFollowBlockNum;
						LastInst->SetSPARKTranslated();
						pair<int, int> BlockItem(NextBlockNum, ResumeBlockNum);
						pair<int, pair<int, int> > WorkListItem(LoopNum, BlockItem);
						this->SPARKLoopWorkList.push_back(WorkListItem);
					}
					else if ((JUMP_TO_DEFAULT_CASE == LastCFType) || (JUMP_TO_SWITCH_INDIR_JUMP == LastCFType)) {
						; // jump to default case is not translated directly; ditto for jump around default case to INDIR_JUMP
						LastInst->SetSPARKTranslated();
					}
					else {
						// All other cases should be handled in SMPInstr::EmitSPARKAda().
						LastInst->EmitSPARKAda(SPARKBodyFile);
						// Handle special case in which a JUMP just goes to the lone successor,
						//  which is equivalent to falling through.
						if (FALL_THROUGH == LastCFType) {
							ResumeBlockNum = (*(CurrBlock->GetFirstConstSucc()))->GetNumber();
						}
					}
				}
				else if (FlowType == RETURN) {
					LastInst->EmitSPARKAda(SPARKBodyFile);
					// Handle possible tail call case
					;
				}
			} // end if LastInst is basic block terminator
			else {
				// Last inst was translated by SMPBasicBlock::EmitSPARKAdaForFallThroughInsts().
				//  CurrBlock can only have one successor, a fall-through block.
				// NOTE: If CurrBlock ends with a HALT or a call to a NORET function, we can have no successors.
				size_t NumSuccessors = CurrBlock->GetNumSuccessors();
				if (1 == NumSuccessors) {
					SMPBasicBlock *FallThroughSucc = (*(CurrBlock->GetFirstSucc()));
					assert(NULL != FallThroughSucc);
					ResumeBlockNum = FallThroughSucc->GetNumber();
				}
				else {
					assert(((FlowType == CALL) || (FlowType == HALT)) && (0 == NumSuccessors));
				}
			} // end if LastInst is basic block terminator ... else ...
		} // end if loop tail block ... else ...

		// We have translated the header block, and recursed into the next control flow structure if the end of the
		//  header block indicated a new structure was beginning. Time to pick up with ResumeBlock unless it is our
		//  follow block.
		CurrBlockNum = ResumeBlockNum;
	} // end while CurrBlockNum is in the loop

	this->SPARKControlStack.pop_back();
	return;
} // end of SMPFunction::EmitSPARKAdaForLoop()

// recursive descent translation of switch statement starting with INDIR_JUMP block, stop before Follow Block
void SMPFunction::EmitSPARKAdaForSwitch(int HeaderBlockNum, int FollowBlockNum, FILE *SPARKBodyFile) {
	assert(0 <= HeaderBlockNum);
	SMPBasicBlock *HeaderBlock = this->GetBlockByNum((size_t) HeaderBlockNum);
	this->SPARKControlStack.push_back(SPARK_SWITCH);

	size_t SwitchIndex;
	SMPInstr *LastInst;
	if (HeaderBlock->IsSwitchDefaultCase()) {
		// Encountered default case first, but we want to emit Ada code for it last.
		STARS_ea_t DefaultCaseAddr = HeaderBlock->GetFirstAddr();
		SwitchIndex = this->FindSwitchIndexForDefaultCaseAddr(DefaultCaseAddr);
		// Switch the HeaderBlock to the INDIR_JUMP block.
		HeaderBlock = this->GetBlockByNum(this->SwitchInfoArray[SwitchIndex].IndirJumpBlockNum);
		assert(NULL != HeaderBlock);
		LastInst = (*(--(HeaderBlock->GetLastInst())));
	}
	else {
		vector<SMPInstr *>::iterator LastInstIter = --(HeaderBlock->GetLastInst());
		LastInst = (*LastInstIter);

		STARS_ea_t LastAddr = LastInst->GetAddr();
		SMPitype FlowType = LastInst->GetDataFlowType();
		assert(FlowType == INDIR_JUMP);
		map<STARS_ea_t, size_t>::iterator MapIter = this->SwitchJumpMap.find(LastAddr);
		if (MapIter != this->SwitchJumpMap.end()) {
			SwitchIndex = MapIter->second;
		}
		else {
			SMP_msg("FATAL ERROR: Failed to find INDIR_JUMP addr %llx in SwitchJumpMap.\n", (unsigned long long) LastAddr);
			SMP_fprintf(SPARKBodyFile, "ERROR in switch statement translation at %llx.\n", (unsigned long long) LastAddr);
			assert(false);
		}
	}
	assert(SwitchIndex < this->SwitchInfoArray.size());

	struct SwitchTableInfo TableInfo = this->SwitchInfoArray.at(SwitchIndex);
	int NextFollowBlockNum = TableInfo.FollowNodeNum;
	assert(NextFollowBlockNum == FollowBlockNum);
	// Emit Ada case statement.
	LastInst->EmitSPARKAda(SPARKBodyFile); // INDIR_JUMP is handled here
	// Loop through jump table, emitting code for each case.
	assert(!TableInfo.CaseBlockNums.empty());
	for (size_t CaseIndex = 0; CaseIndex < TableInfo.CaseBlockNums.size(); ++CaseIndex) {
		// Skip the default case if jump table entries to it are encountered.
		int CaseBlockNum = TableInfo.CaseBlockNums[CaseIndex];
		assert(0 <= CaseBlockNum);
		if (CaseBlockNum == TableInfo.DefaultCaseBlockNum) {
			continue;  // we emit the default at the end of the Ada case statement as "others =>"
		}

		// Ada cases look like:
		//  when 3 =>
		// or
		//  when 3 | 4 | 5 =>
		PrintSPARKIndentTabs(SPARKBodyFile);
		SMP_fprintf(SPARKBodyFile, "when ");
		assert(!TableInfo.IndexValue[CaseIndex].empty());
		size_t LastValueIndex = TableInfo.IndexValue[CaseIndex].size() - 1;
		for (size_t ValueIndex = 0; ValueIndex <= LastValueIndex; ++ValueIndex) {
			int Value = TableInfo.IndexValue[CaseIndex][ValueIndex];
			SMP_fprintf(SPARKBodyFile, "%d ", Value);
			if (ValueIndex < LastValueIndex) {
				SMP_fprintf(SPARKBodyFile, "| "); // OR operator before next value
			}
			else {
				SMP_fprintf(SPARKBodyFile, "=>\n");
				++STARS_SPARK_IndentCount;
			}
		} // end for all values in current case
		// Now, translate the code inside the case.
		this->EmitSPARKAdaForBlock(CaseBlockNum, NextFollowBlockNum, SPARKBodyFile, false);
		--STARS_SPARK_IndentCount;
	} // end for all cases
	// Emit the default case.
	if (SMP_BLOCKNUM_UNINIT != TableInfo.DefaultCaseBlockNum) { // we have a default case
		PrintSPARKIndentTabs(SPARKBodyFile);
		SMP_fprintf(SPARKBodyFile, "others =>\n");
		++STARS_SPARK_IndentCount;
		this->EmitSPARKAdaForBlock(TableInfo.DefaultCaseBlockNum, NextFollowBlockNum, SPARKBodyFile, true);
		--STARS_SPARK_IndentCount;
	}
	// Emit the "end case;" statement
	--STARS_SPARK_IndentCount;
	PrintSPARKIndentTabs(SPARKBodyFile);
	SMP_fprintf(SPARKBodyFile, "end case;\n");

	this->SPARKControlStack.pop_back();
	// After translating the switch statement, we must be ready to resume at the switch follow node in our caller.
	return;
} // end of SMPFunction::EmitSPARKAdaForSwitch()

// recursive descent translation of if-else statement starting with COND_BRANCH block, stop before Follow Block
void SMPFunction::EmitSPARKAdaForConditional(int HeaderBlockNum, int FollowBlockNum, FILE *SPARKBodyFile) {
	assert(0 <= HeaderBlockNum);
	SMPBasicBlock *HeaderBlock = this->GetBlockByNum((size_t) HeaderBlockNum);
	SMPInstr *LastInst = (*(HeaderBlock->GetRevInstBegin()));
	STARS_ea_t LastAddr = LastInst->GetAddr();

	// We need to detect the case in which we are beginning a guarded loop, i.e.
	//  if cond then loop ... end loop; end if;
	// For guarded loops, we save the COND_BRANCH location on the loop work list
	//  and return. If the SPARKControlStack already has a LOOP entry, or if
	//  we are not translating loop instructions, we just emit a call to the loop procedure
	//  that will be emitted later and save the COND_BRANCH on the work list.
	//  Otherwise, we actually translate the instructions.
	map<STARS_ea_t, STARS_ea_t>::const_iterator GuardIter = this->GuardToLoopMap.find(LastAddr);
	bool SaveAndReturn = false;
	bool TranslatingGuardedLoop = (GuardIter != this->GuardToLoopMap.cend());
	if (TranslatingGuardedLoop) {
		// COND_BRANCH instruction is a guard for a loop.
		SaveAndReturn = (!this->TranslatingLoopToProc()) || this->IsSPARKLoopInTranslationStack();
		// Even if we are translating a loop to a procedure, nested loops
		//  do not get translated until they come to the head of the work list.

		if (SaveAndReturn) {
			STARS_ea_t LoopAddr = GuardIter->second;
			SMPBasicBlock *LoopHeadBlock = this->GetBlockFromInstAddr(LoopAddr);
			int CurrBlockNum = LoopHeadBlock->GetNumber();
			int LoopNum = this->GetLoopNumFromHeaderBlockNum(CurrBlockNum);
			assert(0 <= LoopNum);
			this->EmitSPARKAdaLoopCall(LoopAddr, (size_t) LoopNum, SPARKBodyFile);
			// Put the if-then header and follow block numbers onto the work list
			pair<int, int> BlockItem(HeaderBlockNum, FollowBlockNum);
			pair<int, pair<int, int> > WorkListItem(LoopNum, BlockItem);
			this->SPARKLoopWorkList.push_back(WorkListItem);
			// Return so that our caller can resume after the if-then-loop-endloop-endif.
			return;
		}
	}

	LastInst->EmitSPARKAda(SPARKBodyFile); // emit if (inverted condition) then

	// We have already emitted the "if (inverted condition) then" for the COND_BRANCH instruction.
	//  For the if-then case, we emit the code for the then-block and then "end if;" and return.
	//  For the if-then-else case, we emit the code for the then-block and then "else" etc.
	ControlFlowType LastCFType = this->GetControlFlowType(LastAddr);
	bool IfThenCase = (LastCFType == BRANCH_IF_THEN);
	bool ShortCircuitCase = (LastCFType == SHORT_CIRCUIT_BRANCH);
	assert(IfThenCase || ShortCircuitCase || (LastCFType == BRANCH_IF_THEN_ELSE));
	int FallThroughBlockNum = SMP_BLOCKNUM_UNINIT;
	int DistantBlockNum = SMP_BLOCKNUM_UNINIT;
	list<SMPBasicBlock *>::const_iterator SuccIter = HeaderBlock->GetCondNonFallThroughSucc();
	if (!ShortCircuitCase) {
		assert(SuccIter != HeaderBlock->GetLastConstSucc());
		DistantBlockNum = (*SuccIter)->GetNumber();
	}
	// Find the fall-through and non-fall-through block numbers.
	if (ShortCircuitCase) {
		STARSCFGBlock *HeaderCFGBlock = this->ShadowCFGBlocks[HeaderBlockNum];
		assert(NULL != HeaderCFGBlock);
		STARSCondExpr *HeaderExpr = HeaderCFGBlock->GetExpr();
		assert(NULL != HeaderExpr);
		FallThroughBlockNum = HeaderExpr->GetFallThroughBlockNum();
		DistantBlockNum = HeaderExpr->GetNonFallThroughBlockNum();
		IfThenCase = (DistantBlockNum == FollowBlockNum); // treat ShortCircuitCase as IfThenCase from now on
	}
	else {
		assert(2 == HeaderBlock->GetNumSuccessors());
		SuccIter = HeaderBlock->GetFallThroughSucc();
		FallThroughBlockNum = (*SuccIter)->GetNumber();
	}
	SMPBasicBlock *ThenBlock = this->RPOBlocks[(size_t) FallThroughBlockNum];
	if (IfThenCase) {
		// The common case is to jump around the ThenBlock, which can then fall-through to the FollowBlock:
		//  if (cond) then goto L1;
		//   then-block
		//  L1: 
		// However, it is also possible to jump to the ThenBlock, which then jumps or falls through to the FollowBlock:
		// if (cond) then goto L2;
		// L1: 
		//      somewhere else:  L2: then-block
		//                           goto L1
		// This could be odd code generation or hand-written code.
		// In this odd case, the FallThroughBlock has more than 1 predecessor. In the normal case, the
		//  FallThroughBlock has only one predecessor (the COND_BRANCH block falls through to it). We can use
		//  the number of predecessor blocks to distinguish the two cases.
		// NOTE: The ShortCircuitCase code earlier assumes that this odd case cannot be combined with the ShortCircuitCase.
		this->SPARKControlStack.push_back(SPARK_THEN_CLAUSE);
		bool OddIfThenCase = LastInst->IsOddIfThenCase();
		if (!OddIfThenCase) { // normal case
			assert(DistantBlockNum == FollowBlockNum); // non-fall-through block is normal follow block for if-then
			this->EmitSPARKAdaForBlock(FallThroughBlockNum, FollowBlockNum, SPARKBodyFile, false, (TranslatingGuardedLoop && !SaveAndReturn));
		}
		else {
			// Need to generate the code for the ThenBlock == DistantBlock first.
			this->EmitSPARKAdaForBlock(DistantBlockNum, FollowBlockNum, SPARKBodyFile, false, (TranslatingGuardedLoop && !SaveAndReturn));
		}
	}
	else {
		this->SPARKControlStack.push_back(SPARK_THEN_CLAUSE);
		this->EmitSPARKAdaForBlock(FallThroughBlockNum, FollowBlockNum, SPARKBodyFile, false);
		this->SPARKControlStack.pop_back();
		// SMPBasicBlock *ElseBlock = this->RPOBlocks[(size_t) DistantBlockNum];
		--STARS_SPARK_IndentCount;
		PrintSPARKIndentTabs(SPARKBodyFile);
		SMP_fprintf(SPARKBodyFile, "else\n");
		++STARS_SPARK_IndentCount;
		this->SPARKControlStack.push_back(SPARK_ELSE_CLAUSE);
		this->EmitSPARKAdaForBlock(DistantBlockNum, FollowBlockNum, SPARKBodyFile, false);
		this->SPARKControlStack.pop_back();
	}

	// We need to detect the odd case of falling through to a loop header with INVERTED_LOOP_EXIT,
	//  i.e. we are doing a LOOP_BACK of sorts by falling out of the conditional,
	//  in which case the "end if;" already was emitted before "end loop;" inside EmitSPARKAdaForBlock().
	bool InvertedLoopExitCase = false;
	if (FollowBlockNum >= 0) {
		SMPBasicBlock *FollowBlock = this->GetBlockByNum((size_t) FollowBlockNum); // will be loop header that includes conditional in InvertedLoopExitCase
		ControlFlowType FollowBlockCFType = FollowBlock->GetLastInstCFType();
		InvertedLoopExitCase = (this->DoesBlockDominateBlock(FollowBlockNum, FallThroughBlockNum) && (INVERTED_LOOP_EXIT == FollowBlockCFType));
	}
	// Now we just need to emit the "end if;"
	if (!InvertedLoopExitCase) {
		--STARS_SPARK_IndentCount;
		PrintSPARKIndentTabs(SPARKBodyFile);
		SMP_fprintf(SPARKBodyFile, "end if;\n");
	}
	else {
		SMP_msg("INFO: SPARK: Not emitting end if for guarded conditional at %llx due to InvertedLoopExitCase\n",
			(uint64_t) LastAddr);
	}

	return;
} // end of SMPFunction::EmitSPARKAdaForConditional()

// emit call to loop proc that will be created later starting at LoopAddr
void SMPFunction::EmitSPARKAdaLoopCall(STARS_ea_t LoopAddr, size_t LoopIndex, FILE *SPARKBodyFile) {
	std::string ProcName("ZSTLoopProc_");
	std::ostringstream AddrString;
	AddrString << std::hex << LoopAddr;
	ProcName += AddrString.str();
	// Emit a call to the proc name, e.g. "ZSTLoopProc_8048ca0;"
	SMP_fprintf(SPARKBodyFile, "\n");
	PrintSPARKIndentTabs(SPARKBodyFile);
	if (this->LoopMemRangeInArgRegsBitmap[LoopIndex].any()) {
		SMP_fprintf(SPARKBodyFile, "%s(", ProcName.c_str());
		this->EmitSPARKArgs(SPARKBodyFile, NULL, false, LoopIndex);
		SMP_fprintf(SPARKBodyFile, ");\n");
	}
	else {
		SMP_fprintf(SPARKBodyFile, "%s;\n", ProcName.c_str());
	}

	return;
} // end of SMPFunction::EmitSPARKAdaLoopCall()

// Based on control flow structure of CurrBlock, find Ada follow block num; -1 if no structure besides fall-through
int SMPFunction::FindFollowBlockNum(SMPBasicBlock *CurrBlock, bool StartAtLastInst) {
	int FollowBlockNum = SMP_BLOCKNUM_UNINIT;
	if (!StartAtLastInst) { // StartAtLastInst => Key is to examine branch or jump, not starting a new loop or switch default case
		if (CurrBlock->IsLoopHeaderBlock()) {
			int LoopNum = this->GetLoopNumFromHeaderBlockNum(CurrBlock->GetNumber());
			assert(0 <= LoopNum);
			FollowBlockNum = this->LoopFollowNodes[LoopNum];
			assert(0 <= FollowBlockNum);
		}
		else if (CurrBlock->IsSwitchDefaultCase()) {
			STARS_ea_t DefaultCaseAddr = CurrBlock->GetFirstAddr();
			size_t SwitchIndex = this->FindSwitchIndexForDefaultCaseAddr(DefaultCaseAddr);
			FollowBlockNum = this->SwitchInfoArray.at(SwitchIndex).FollowNodeNum;
		}
	}
	else {
		vector<SMPInstr *>::iterator LastInstIter = --(CurrBlock->GetLastInst());
		SMPInstr *LastInst = (*LastInstIter);
		STARS_ea_t LastAddr = LastInst->GetAddr();
		SMPitype FlowType = LastInst->GetDataFlowType();
		if (FlowType == INDIR_JUMP) { // switch statement
			map<STARS_ea_t, size_t>::iterator MapIter = this->SwitchJumpMap.find(LastAddr);
			if (MapIter != this->SwitchJumpMap.end()) {
				size_t SwitchIndex = MapIter->second;
				assert(SwitchIndex < this->SwitchInfoArray.size());
				FollowBlockNum = this->SwitchInfoArray.at(SwitchIndex).FollowNodeNum;
			}
			else { // leave error code of SMP_BLOCKNUM_UNINIT in FollowBlockNum
				SMP_msg("ERROR: Failed to find INDIR_JUMP addr %llx in SwitchJumpMap.\n", (unsigned long long) LastAddr);
			}
		}
		else if (FlowType == COND_BRANCH) {
			ControlFlowType LastCFType = GetControlFlowType(LastAddr);
			if (IsLoopExitFlow(LastCFType)) {
				// Middle exit from loop. Follow block is fall-through.
				list<SMPBasicBlock *>::const_iterator SuccIter;
				if (INVERTED_LOOP_EXIT != LastCFType)
					SuccIter = CurrBlock->GetFallThroughSucc();
				else // must take branch to stay in the loop for INVERTED_LOOP_EXIT
					SuccIter = CurrBlock->GetCondNonFallThroughSucc();
				assert(SuccIter != CurrBlock->GetLastConstSucc()); // COND_BRANCH must have fall through and non-fall-through
				FollowBlockNum = (*SuccIter)->GetNumber();
			}
			else {
				// Handle if-else jumps here.
				assert((LastCFType == BRANCH_IF_THEN) || (LastCFType == BRANCH_IF_THEN_ELSE) || (LastCFType == SHORT_CIRCUIT_BRANCH));
				map<STARS_ea_t, int>::iterator MapIter = this->JumpFollowNodesMap.find(LastAddr);
				if (MapIter != this->JumpFollowNodesMap.end()) { // found it
					FollowBlockNum = MapIter->second;
				}
				else {
					SMP_msg("ERROR: Failed to find COND_BRANCH entry for addr %llx in JumpFollowNodesMap.\n",
						(unsigned long long) LastAddr);
				}
			}
		}
	}
	return FollowBlockNum;
} // end of SMPFunction::FindFollowBlockNum()

// common code for different cases in EmitFuncPtrShadowingAnnotations2()
void SMPFunction::EmitShadowingHelper(FILE *InfoAnnotFile, SMPInstr *CurrInst, bool FuncPtr, STARS_ea_t CallAddr) {
	STARS_ea_t ShadowCheckAddr = CurrInst->GetAddr();
	STARS_ea_t OldShadowCheckAddr = STARS_BADADDR;
	bool GoodCallAddr = (STARS_BADADDR != CallAddr);
	bool InterruptCall = CurrInst->IsInterruptCall();
	bool ImproveCFG = global_STARS_program->ShouldSTARSMaximizeCFGImprovement();
	bool ShadowFuncPtrs = (FuncPtr && global_STARS_program->ShouldSTARSShadowFuncPtrs());
	bool ShadowArgs = !FuncPtr;
	ShadowPoint CriticalOp(ShadowCheckAddr, 0); // Checking point before indirect call
	list<ShadowPoint> WorkList;
	WorkList.push_back(CriticalOp);
	this->AlreadyShadowed.clear();

	ShadowSet ShadowUses;      // set of addr+USE pairs to shadow values that will be checked at ShadowCheckAddr
	ShadowSet NewCriticalOps;  // worklist of new shadow USEs and their addresses
	while (!WorkList.empty()) {
		CriticalOp = WorkList.front();
		WorkList.pop_front();
		ShadowCheckAddr = CriticalOp.first;
		if (ShadowCheckAddr != OldShadowCheckAddr) { // starting a new chain
			OldShadowCheckAddr = ShadowCheckAddr;
		}

		bool UnsafeCodePointerChain = false;
		if (ShadowFuncPtrs)
			++STARS_FuncPtrShadowPointsAttempted;
		bool NonConstSourceFound = false;
		set<STARS_uval_t> ConstValues;
		bool ValidShadowing = this->FindShadowingPoint2(CriticalOp, false, ShadowUses, UnsafeCodePointerChain, NewCriticalOps, NonConstSourceFound, ConstValues);
		if (!NonConstSourceFound && ValidShadowing && ImproveCFG && GoodCallAddr && (!InterruptCall)) {
			SMP_msg("INFO: Only constants found in tracing func ptr at %llx\n", (uint64_t) ShadowCheckAddr);
			FILE *XrefsFile = global_STARS_program->GetXrefsFile();
			for (set<STARS_uval_t>::const_iterator ValIter = ConstValues.cbegin(); ValIter != ConstValues.cend(); ++ValIter) {
				SMP_fprintf(XrefsFile, "%18llx %6d INSTR XREF IBT FROMIB %18llx INDIRCALL\n",
					(uint64_t) (*ValIter), CurrInst->GetSize(), (uint64_t) CallAddr);
			}
			if (!ConstValues.empty()) {
				SMP_fprintf(XrefsFile, "%18llx %6d INSTR XREF FROMIB COMPLETE %6zu INDIRCALL\n",
					(uint64_t) CallAddr, CurrInst->GetSize(), ConstValues.size());
			}
		}
		if (ValidShadowing && UnsafeCodePointerChain) {
			if (ShadowFuncPtrs)
				++STARS_FuncPtrShadowPointsSucceeded;
			if (ShadowArgs || ShadowFuncPtrs) { // not just ImproveCFG
				// Need to shadow ShadowDefs right before their addrs, validate CriticalOp USE at ShadowCheckAddr
				unsigned int CurrentShadowID = global_STARS_program->GetShadowID();
				for (ShadowSet::const_iterator ShadowIter = ShadowUses.cbegin(); ShadowIter != ShadowUses.cend(); ++ShadowIter) {
					STARS_ea_t ShadowAddr = ShadowIter->first;
					assert(STARS_BADADDR != ShadowAddr);
					STARSOpndTypePtr ShadowOp = this->TempShadowList.GetRefNum(ShadowIter->second)->GetOp();
					assert((nullptr != ShadowOp) && (!ShadowOp->IsVoidOp()));
					SMPInstr *ShadowInst = this->GetInstFromAddr(ShadowAddr);
					STARSOpndTypePtr UnnormalizedUseOp = CloneIfNecessary(ShadowOp, this->UsesFramePointer());
					if (ShadowOp->IsMemOp()) {
						// Un-normalize stack ops for annotation printing.
						ShadowInst->MDGetUnnormalizedOp(UnnormalizedUseOp);
					}

					if (ShadowFuncPtrs) {
						// Emit the annotation to shadow the InArg value at the top of the function.
						SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR FPTRSHADOW ",
							(unsigned long long) ShadowAddr, ShadowInst->GetSize());
					}
					else if (ShadowArgs) {
						// Emit the annotation to shadow the InArg value at the top of the function.
						SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR ARGSHADOW ",
							(unsigned long long) ShadowAddr, ShadowInst->GetSize());
					}
					AnnotPrintOperand(UnnormalizedUseOp, InfoAnnotFile, UseFP, ShadowInst->MDIsAddressing64bit());
					SMP_fprintf(InfoAnnotFile, " SHADOWID %u\n", CurrentShadowID);

					AlreadyShadowed.insert(*ShadowIter);
				} // end for all ShadowUses

				// Emit the annotation to check the shadowed value before the INDIR_CALL.
				if (!ShadowUses.empty()) {
					SMPInstr *ShadowCheckInst = this->GetInstFromAddr(ShadowCheckAddr);
					if (FuncPtr) {
						SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR FPTRCHECK ",
							(unsigned long long) ShadowCheckAddr, ShadowCheckInst->GetSize());
					}
					else {
						SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR ARGCHECK ",
							(unsigned long long) ShadowCheckAddr, ShadowCheckInst->GetSize());
					}
					STARSOpndTypePtr UseOp = ShadowCheckInst->GetFirstRightOperandNoNorm(); // for annotation printing
					AnnotPrintOperand(UseOp, InfoAnnotFile, UseFP, ShadowCheckInst->MDIsAddressing64bit());
					SMP_fprintf(InfoAnnotFile, " SHADOWID %u\n", CurrentShadowID);

					// Increment the unique shadowing index
					global_STARS_program->IncrementShadowID();
				}
			}
		} // end if (ValidShadowing && UnsafeCodePointerChain)
		else if (ValidShadowing) { // Must be safe from overwriting
			if (FuncPtr)
				++STARS_FuncPtrShadowPointsSafe;
		}
		else {
			if (FuncPtr)
				++STARS_FuncPtrShadowPointsFailed;
		}

		// Now we need to find shadowing points for the NewCriticalOps.
		if (!NewCriticalOps.empty()) {
			for (ShadowSet::const_iterator NewIter = NewCriticalOps.cbegin(); NewIter != NewCriticalOps.cend(); ++NewIter) {
				ShadowPoint CurrPoint = *NewIter;
				if (!this->IsAlreadyShadowed(CurrPoint)) {
					// Only add to WorkList if not already shadowed.
					WorkList.push_back(CurrPoint);
				}
			}
			NewCriticalOps.clear();
		}
		ShadowUses.clear();
	} // end while (!WorkList.empty())
	this->AlreadyShadowed.clear();

	return;
} // end of SMPFunction::EmitShadowingHelper()

// Emit annotations for func ptr shadowing defense and/or for improving CFG related to INDIR_CALLs.
void SMPFunction::EmitFuncPtrShadowingAnnotations2(FILE *InfoAnnotFile) {
	// Keep track of which annotations are requested.
	bool ImproveCFG = global_STARS_program->ShouldSTARSMaximizeCFGImprovement();
	bool ShadowFuncPtrs = global_STARS_program->ShouldSTARSShadowFuncPtrs();
	if (this->HasGoodRTLs() && (ImproveCFG || this->HasUnsafeIndirectWrites) && (!this->IsLeaf())) {
		// Candidate for shadowing function pointers, if any of three cases passes through unsafe blocks.
		for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
			if (CurrBlock->HasCallInstruction()) {
				size_t ArgNum = 0;
				bool Case3Search = false; // have not found CALL yet
				for (vector<SMPInstr *>::reverse_iterator LastInstIter = CurrBlock->GetRevInstBegin(); LastInstIter != CurrBlock->GetRevInstEnd(); ++LastInstIter) {
					SMPInstr *CurrInst = (*LastInstIter);
					// Case 1: InArg CODEPTR reaches INDIR_CALL via simple copies
					// Case 2: Internally initialized CODEPTR reaches INDIR_CALL via simple copies
					if (INDIR_CALL == CurrInst->GetDataFlowType()) {
						Case3Search = false; // Override previous CALL now that we are processing INDIR_CALL
						STARSOpndTypePtr UseOp = CurrInst->GetFirstRightOperand();
						STARSDefUseIter UseIter = CurrInst->FindUse(UseOp);
						assert(UseIter != CurrInst->GetLastUse());
						this->TempShadowList.clear();
						this->TempShadowList.SetRef(UseIter->GetOp(), UseIter->GetType(), UseIter->GetSSANum());

						this->EmitShadowingHelper(InfoAnnotFile, CurrInst, true, CurrInst->GetAddr());
					} // end if INDIR_CALL
					else if (CALL == CurrInst->GetDataFlowType()) {
						// Search for case 3: CODEPTR passed as outgoing arg.
						Case3Search = ShadowFuncPtrs;
					}
					else if (Case3Search && CurrInst->MDIsArgumentPass(ArgNum)) {
						STARSDefUseIter ArgIter = CurrInst->GetFirstNonFlagsDef();
						if ((ArgIter != CurrInst->GetLastDef()) && IsEqType(CODEPTR, ArgIter->GetType())) {
							STARSOpndTypePtr ArgOp = nullptr;
							unsigned short SignMask = 0;
							if (CurrInst->MDIsMoveInstr()) {
								ArgOp = CurrInst->GetMoveSource(); // Need a USE for FindShadowingPoint2()
							}
							else if (CurrInst->MDIsSignedLoad(SignMask)) {
								ArgOp = CurrInst->GetFirstRightOperandNoNorm();
								if (ArgOp->IsRegOp()) {
									CanonicalizeOpnd(ArgOp);
								}
								else if (MDIsStackAccessOpnd(ArgOp, this->UsesFramePointer()) && CurrInst->AreDefsNormalized()) {
									ArgOp = this->GetNormalizedOperand(CurrInst->GetAddr(), ArgOp);
								}
							}
							if ((nullptr != ArgOp) && !ArgOp->IsVoidOp()) {
								STARSDefUseIter UseIter = CurrInst->FindUse(ArgOp);
								assert(UseIter != CurrInst->GetLastUse());
								this->TempShadowList.clear();
								this->TempShadowList.SetRef(ArgOp, UseIter->GetType(), UseIter->GetSSANum());
								this->EmitShadowingHelper(InfoAnnotFile, CurrInst, true, STARS_BADADDR);
							}
						} // end if CODEPTR ArgIter
					} // end if INDIR_CALL elsif CALL elsif Case3 arg pass
				} // end for all insts in reverse order
			} // end if current block has a call inst
		} // end for all blocks

	} // end if good RTLs, unsafe indirect writes, and not a leaf func
	return;
} // end of SMPFunction::EmitFuncPtrShadowingAnnotations2()

// Emit annotations for critical argument shadowing defense
void SMPFunction::EmitArgShadowingAnnotations(FILE *InfoAnnotFile) {
	if (this->HasGoodRTLs() && this->HasUnsafeIndirectWrites && (!this->IsLeaf())) {
		// Candidate for shadowing critical args, if either of two cases passes through unsafe blocks.
		for (size_t BlockIndex = 0; BlockIndex < this->RPOBlocks.size(); ++BlockIndex) {
			SMPBasicBlock *CurrBlock = this->RPOBlocks[BlockIndex];
			if (CurrBlock->HasCallInstruction()) {
				size_t ArgNum = 0;
				bool ArgSearch = false; // have not found CALL yet
				uint32_t CriticalArgPosBits = 0;
				for (vector<SMPInstr *>::reverse_iterator LastInstIter = CurrBlock->GetRevInstBegin(); LastInstIter != CurrBlock->GetRevInstEnd(); ++LastInstIter) {
					SMPInstr *CurrInst = (*LastInstIter);
					// Case 1: CALL to critical library function.
					if (CALL == CurrInst->GetDataFlowType()) {
						// Case 1: Calling critical library function.
						string TaintFuncName = CurrInst->GetTrimmedCalledFunctionName();
						GetTaintWarningArgPositionsForCallName(TaintFuncName, CriticalArgPosBits);
						ArgSearch = (0 < CriticalArgPosBits); // found a critical call if any critical arg
						// Case 2: Callee has critical InArg (eventually passed to critical library function).
						if (!ArgSearch) {
							STARS_ea_t CalleeAddr = CurrInst->GetCallTarget();
							if (STARS_BADADDR != CalleeAddr) {
								SMPFunction *CalleeFunc = this->GetProg()->FindFunction(CalleeAddr);
								if (nullptr != CalleeFunc) {
									CriticalArgPosBits = CalleeFunc->GetTaintInArgPositions();
									ArgSearch = (0 < CriticalArgPosBits); // found a critical call if any critical arg
								}
							}
						}
					}
					else if (ArgSearch && CurrInst->MDIsArgumentPass(ArgNum)) {
						STARSDefUseIter ArgDefIter = CurrInst->GetFirstNonFlagsDef();
						if (ArgDefIter != CurrInst->GetLastDef()) {
							STARSOpndTypePtr ArgDefOp = ArgDefIter->GetOp();
							if (ArgDefOp->IsRegOp()) {
								if (0 != (CriticalArgPosBits & (1 << ArgNum))) {
									// ArgNum matches a bit set in CriticalArgPosBits.
									STARSOpndTypePtr ArgOp = nullptr;
									unsigned short SignMask = 0;
									if (CurrInst->MDIsMoveInstr()) {
										ArgOp = CurrInst->GetMoveSource(); // Need a USE for FindShadowingPoint2()
									}
									else if (CurrInst->MDIsSignedLoad(SignMask)) {
										ArgOp = CurrInst->GetFirstRightOperandNoNorm();
										if (ArgOp->IsRegOp()) {
											CanonicalizeOpnd(ArgOp);
										}
										else if (MDIsStackAccessOpnd(ArgOp, this->UsesFramePointer()) && CurrInst->AreDefsNormalized()) {
											ArgOp = this->GetNormalizedOperand(CurrInst->GetAddr(), ArgOp);
										}
									}
									if ((nullptr != ArgOp) && !ArgOp->IsVoidOp()) {
										STARSDefUseIter UseIter = CurrInst->FindUse(ArgOp);
										assert(UseIter != CurrInst->GetLastUse());
										this->TempShadowList.clear();
										this->TempShadowList.SetRef(ArgOp, UseIter->GetType(), UseIter->GetSSANum());
										this->EmitShadowingHelper(InfoAnnotFile, CurrInst, false, STARS_BADADDR);
									}
								}
							}
						} // end if ArgDefIter valid
					} // end if CALL elsif arg pass
				} // end for all insts in reverse order
			} // end if current block has a call inst
		} // end for all blocks

	} // end if good RTLs, unsafe indirect writes, and not a leaf func
	return;
} // end of SMPFunction::EmitArgShadowingAnnotations()

// To assist in stack transformation security defenses, if the 
//  memory range expression is stack-relative, we emit an annotation
//  that identifies the memory range accessed, at InstAddr.
// The annotation should look like:
//  [hex InstAddr] [InstSize] INSTR STACKMEMRANGE MIN [ESP-k] LIMIT [ESP-j] INSTRSPDELTA -d ZZ
// The INSTRSPDELTA is the value of RSP relative to its value on function entry, which needs
//  to be added to the other constants to obtain the entry-relative stack offsets.
//  The other constants are relative to the current SPOFFSET for the mem write instruction.
// We write the InstAddrs of static memory lea opcodes to StaticMemLeaAddrs for later processing.
void SMPFunction::EmitStackMemRangeAnnotations(STARSExpression *MemLowerBound, STARSExpression *MemUpperBound, bool PositiveIncrement, const set<STARS_ea_t> &StackPtrCopiesSet, set<STARS_ea_t> &StaticMemLeaAddrs) {
	FILE *InfoAnnotFile = global_STARS_program->GetInfoAnnotFile();
	SMPInstr *MemInst = MemLowerBound->GetOriginalParentInst();
	STARS_ea_t MemAddr = MemInst->GetAddr();
	char DummyBuf[STARS_MAXSTR];
	(void) SMP_snprintf(DummyBuf, STARS_MAXSTR - 1, "%18llx", (uint64_t) MemAddr);
	string OutputString1(DummyBuf);
	OutputString1 += " ";
	(void) SMP_snprintf(DummyBuf, STARS_MAXSTR - 1, "%6lld", (int64_t) MemInst->GetSize());
	OutputString1 += DummyBuf;
	string OutputString2(" INSTR STACKMEMRANGE MIN ");
#if 0
	std::ostringstream AddrString;
	AddrString << std::hex << MemAddr << " ";
	AddrString << std::dec << MemInst->GetSize() << " INSTR STACKMEMRANGE MIN ";
	SMP_fprintf(InfoAnnotFile, "%18llx %6zu INSTR STACKMEMRANGE MIN ", (uint64_t)MemAddr, MemInst->GetSize());
#endif
	if (PositiveIncrement) {
		MemLowerBound->StringPrintStackPtrPlusOffset(OutputString2);
	}
	else {
		MemUpperBound->StringPrintStackPtrPlusOffset(OutputString2);
	}
	OutputString2 += " LIMIT ";
	if (PositiveIncrement) {
		MemUpperBound->StringPrintStackPtrPlusOffset(OutputString2);
	}
	else {
		MemLowerBound->StringPrintStackPtrPlusOffset(OutputString2);
	}
	OutputString2 += " INSTRSPDELTA ";
	(void) SMP_snprintf(DummyBuf, STARS_MAXSTR - 1, "%lld", (int64_t) MemInst->GetStackPtrOffset());
	OutputString2 += DummyBuf;
	OutputString2 += " ZZ";

	SMP_fprintf(InfoAnnotFile, "%s%s\n", OutputString1.c_str(), OutputString2.c_str());

	// Now, emit similar annotations for any StackPtrCopy instructions that were found
	//  when the Mem exprs were ExpandExpr()-ed.
	if (!StackPtrCopiesSet.empty()) {
		for (STARS_ea_t StackPtrCopyInstAddr : StackPtrCopiesSet) {
			assert(STARS_BADADDR != StackPtrCopyInstAddr);
			SMPInstr *StackPtrCopyInst = this->GetInstFromAddr(StackPtrCopyInstAddr);
			assert(nullptr != StackPtrCopyInst);

			// Ensure that we don't have the address of a global static memory item.
			if (StackPtrCopyInst->IsGlobalStaticLoadEffectiveAddress()) {
				(void) StaticMemLeaAddrs.insert(StackPtrCopyInstAddr);
			}
			else if (StackPtrCopyInst->MDIsStackPointerCopy(this->UsesFramePointer())) {
				(void)SMP_snprintf(DummyBuf, STARS_MAXSTR - 1, "%18llx", (uint64_t)StackPtrCopyInstAddr);
				string OutputString3(DummyBuf);
				OutputString3 += " ";
				(void)SMP_snprintf(DummyBuf, STARS_MAXSTR - 1, "%6lld", (int64_t)StackPtrCopyInst->GetSize());
				OutputString3 += DummyBuf;
				// Re-use OutputString2 with new InstAddr and InstSize.
				SMP_fprintf(InfoAnnotFile, "%s%s\n", OutputString3.c_str(), OutputString2.c_str());
			}
			else {
				SMP_msg("ERROR: Inst at %llx should be lea of either stack or static mem address.\n", (uint64_t) StackPtrCopyInstAddr);
			}
		} // end for all addrs in StackPtrCopiesVector[index]
	}
	return;
} // end of SMPFunction::EmitStackMemRangeAnnotations()