svn commit: r294602 - in vendor/llvm/dist: include/llvm/CodeGen include/llvm/IR include/llvm/Transforms/Utils lib/CodeGen lib/CodeGen/AsmPrinter lib/CodeGen/SelectionDAG lib/IR lib/Target/AArch64 l...

Dimitry Andric dim at FreeBSD.org
Fri Jan 22 21:16:13 UTC 2016


Author: dim
Date: Fri Jan 22 21:16:09 2016
New Revision: 294602
URL: https://svnweb.freebsd.org/changeset/base/294602

Log:
  Vendor import of llvm release_38 branch r258549:
  https://llvm.org/svn/llvm-project/llvm/branches/release_38@258549

Added:
  vendor/llvm/dist/test/CodeGen/ARM/cse-flags.ll
  vendor/llvm/dist/test/DebugInfo/ARM/PR26163.ll
  vendor/llvm/dist/test/Transforms/CodeGenPrepare/ARM/
  vendor/llvm/dist/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
  vendor/llvm/dist/test/Transforms/CodeGenPrepare/ARM/lit.local.cfg
  vendor/llvm/dist/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
  vendor/llvm/dist/test/Transforms/Inline/inline-funclets.ll
  vendor/llvm/dist/test/Transforms/InstCombine/bitreverse-hang.ll
Deleted:
  vendor/llvm/dist/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
  vendor/llvm/dist/test/Transforms/InstCombine/bitreverse-recognize.ll
Modified:
  vendor/llvm/dist/include/llvm/CodeGen/MachineFunction.h
  vendor/llvm/dist/include/llvm/CodeGen/SelectionDAGNodes.h
  vendor/llvm/dist/include/llvm/IR/GlobalValue.h
  vendor/llvm/dist/include/llvm/Transforms/Utils/Local.h
  vendor/llvm/dist/include/llvm/Transforms/Utils/SimplifyLibCalls.h
  vendor/llvm/dist/lib/CodeGen/AsmPrinter/DebugLocEntry.h
  vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
  vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp
  vendor/llvm/dist/lib/CodeGen/MachineFunction.cpp
  vendor/llvm/dist/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
  vendor/llvm/dist/lib/IR/Globals.cpp
  vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp
  vendor/llvm/dist/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
  vendor/llvm/dist/lib/Target/ARM/ARMISelLowering.cpp
  vendor/llvm/dist/lib/Target/X86/X86CallingConv.td
  vendor/llvm/dist/lib/Target/X86/X86FrameLowering.cpp
  vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
  vendor/llvm/dist/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
  vendor/llvm/dist/lib/Transforms/Utils/InlineFunction.cpp
  vendor/llvm/dist/lib/Transforms/Utils/Local.cpp
  vendor/llvm/dist/lib/Transforms/Utils/SimplifyLibCalls.cpp
  vendor/llvm/dist/test/CodeGen/AArch64/cxx-tlscc.ll
  vendor/llvm/dist/test/CodeGen/ARM/cxx-tlscc.ll
  vendor/llvm/dist/test/CodeGen/ARM/memfunc.ll
  vendor/llvm/dist/test/CodeGen/X86/cxx_tlscc64.ll
  vendor/llvm/dist/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/cross-module-a.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/multi-module-a.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/simpletest-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/stubs-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-data-align-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/cross-module-a.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/multi-module-a.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/simpletest-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/stubs-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-common-symbols-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-data-align-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-fp-no-external-funcs-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-sm-pic.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-remote.ll
  vendor/llvm/dist/test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-sm-pic.ll
  vendor/llvm/dist/test/MC/AArch64/inst-directive.s
  vendor/llvm/dist/test/Transforms/InstCombine/cos-2.ll
  vendor/llvm/dist/test/Transforms/InstCombine/double-float-shrink-1.ll
  vendor/llvm/dist/tools/lli/lli.cpp
  vendor/llvm/dist/utils/release/test-release.sh

Modified: vendor/llvm/dist/include/llvm/CodeGen/MachineFunction.h
==============================================================================
--- vendor/llvm/dist/include/llvm/CodeGen/MachineFunction.h	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/include/llvm/CodeGen/MachineFunction.h	Fri Jan 22 21:16:09 2016	(r294602)
@@ -295,7 +295,7 @@ public:
   }
 
   /// Should we be emitting segmented stack stuff for the function
-  bool shouldSplitStack();
+  bool shouldSplitStack() const;
 
   /// getNumBlockIDs - Return the number of MBB ID's allocated.
   ///

Modified: vendor/llvm/dist/include/llvm/CodeGen/SelectionDAGNodes.h
==============================================================================
--- vendor/llvm/dist/include/llvm/CodeGen/SelectionDAGNodes.h	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/include/llvm/CodeGen/SelectionDAGNodes.h	Fri Jan 22 21:16:09 2016	(r294602)
@@ -369,6 +369,18 @@ public:
     (UnsafeAlgebra << 3) | (NoNaNs << 4) | (NoInfs << 5) |
     (NoSignedZeros << 6) | (AllowReciprocal << 7);
   }
+
+  /// Clear any flags in this flag set that aren't also set in Flags.
+  void intersectWith(const SDNodeFlags *Flags) {
+    NoUnsignedWrap &= Flags->NoUnsignedWrap;
+    NoSignedWrap &= Flags->NoSignedWrap;
+    Exact &= Flags->Exact;
+    UnsafeAlgebra &= Flags->UnsafeAlgebra;
+    NoNaNs &= Flags->NoNaNs;
+    NoInfs &= Flags->NoInfs;
+    NoSignedZeros &= Flags->NoSignedZeros;
+    AllowReciprocal &= Flags->AllowReciprocal;
+  }
 };
 
 /// Represents one node in the SelectionDAG.
@@ -682,6 +694,9 @@ public:
   /// and directly, but it is not to avoid creating a vtable for this class.
   const SDNodeFlags *getFlags() const;
 
+  /// Clear any flags in this node that aren't also set in Flags.
+  void intersectFlagsWith(const SDNodeFlags *Flags);
+
   /// Return the number of values defined/returned by this operator.
   unsigned getNumValues() const { return NumValues; }
 

Modified: vendor/llvm/dist/include/llvm/IR/GlobalValue.h
==============================================================================
--- vendor/llvm/dist/include/llvm/IR/GlobalValue.h	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/include/llvm/IR/GlobalValue.h	Fri Jan 22 21:16:09 2016	(r294602)
@@ -346,6 +346,10 @@ public:
     return !(isDeclarationForLinker() || isWeakForLinker());
   }
 
+  // Returns true if the alignment of the value can be unilaterally
+  // increased.
+  bool canIncreaseAlignment() const;
+
   /// This method unlinks 'this' from the containing module, but does not delete
   /// it.
   virtual void removeFromParent() = 0;

Modified: vendor/llvm/dist/include/llvm/Transforms/Utils/Local.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Transforms/Utils/Local.h	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/include/llvm/Transforms/Utils/Local.h	Fri Jan 22 21:16:09 2016	(r294602)
@@ -331,6 +331,25 @@ unsigned replaceDominatedUsesWith(Value 
 /// during lowering by the GC infrastructure.
 bool callsGCLeafFunction(ImmutableCallSite CS);
 
+//===----------------------------------------------------------------------===//
+//  Intrinsic pattern matching
+//
+
+/// Try and match a bitreverse or bswap idiom.
+///
+/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
+/// instructions are returned in \c InsertedInsts. They will all have been added
+/// to a basic block.
+///
+/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
+/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
+/// to BW / 4 nodes to be searched, so is significantly faster.
+///
+/// This function returns true on a successful match or false otherwise.
+bool recognizeBitReverseOrBSwapIdiom(
+    Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
+    SmallVectorImpl<Instruction *> &InsertedInsts);
+
 } // End llvm namespace
 
 #endif

Modified: vendor/llvm/dist/include/llvm/Transforms/Utils/SimplifyLibCalls.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Transforms/Utils/SimplifyLibCalls.h	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/include/llvm/Transforms/Utils/SimplifyLibCalls.h	Fri Jan 22 21:16:09 2016	(r294602)
@@ -125,8 +125,6 @@ private:
   Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
 
   // Math Library Optimizations
-  Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B, bool CheckRetType);
-  Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilder<> &B);
   Value *optimizeCos(CallInst *CI, IRBuilder<> &B);
   Value *optimizePow(CallInst *CI, IRBuilder<> &B);
   Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);

Modified: vendor/llvm/dist/lib/CodeGen/AsmPrinter/DebugLocEntry.h
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/AsmPrinter/DebugLocEntry.h	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/CodeGen/AsmPrinter/DebugLocEntry.h	Fri Jan 22 21:16:09 2016	(r294602)
@@ -93,18 +93,7 @@ public:
   /// variable, merge them by appending Next's values to the current
   /// list of values.
   /// Return true if the merge was successful.
-  bool MergeValues(const DebugLocEntry &Next) {
-    if (Begin == Next.Begin) {
-      auto *Expr = cast_or_null<DIExpression>(Values[0].Expression);
-      auto *NextExpr = cast_or_null<DIExpression>(Next.Values[0].Expression);
-      if (Expr->isBitPiece() && NextExpr->isBitPiece()) {
-        addValues(Next.Values);
-        End = Next.End;
-        return true;
-      }
-    }
-    return false;
-  }
+  bool MergeValues(const DebugLocEntry &Next);
 
   /// \brief Attempt to merge this DebugLocEntry with Next and return
   /// true if the merge was successful. Entries can be merged if they

Modified: vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -805,6 +805,24 @@ static bool piecesOverlap(const DIExpres
   return (l1 < r2) && (l2 < r1);
 }
 
+/// \brief If this and Next are describing different pieces of the same
+/// variable, merge them by appending Next's values to the current
+/// list of values.
+/// Return true if the merge was successful.
+bool DebugLocEntry::MergeValues(const DebugLocEntry &Next) {
+  if (Begin == Next.Begin) {
+    auto *Expr = cast_or_null<DIExpression>(Values[0].Expression);
+    auto *NextExpr = cast_or_null<DIExpression>(Next.Values[0].Expression);
+    if (Expr->isBitPiece() && NextExpr->isBitPiece() &&
+        !piecesOverlap(Expr, NextExpr)) {
+      addValues(Next.Values);
+      End = Next.End;
+      return true;
+    }
+  }
+  return false;
+}
+
 /// Build the location list for all DBG_VALUEs in the function that
 /// describe the same variable.  If the ranges of several independent
 /// pieces of the same variable overlap partially, split them up and

Modified: vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -1742,8 +1742,8 @@ bool CodeGenPrepare::optimizeCallInst(Ca
       // over-aligning global variables that have an explicit section is
       // forbidden.
       GlobalVariable *GV;
-      if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->hasUniqueInitializer() &&
-          !GV->hasSection() && GV->getAlignment() < PrefAlign &&
+      if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
+          GV->getAlignment() < PrefAlign &&
           DL->getTypeAllocSize(GV->getType()->getElementType()) >=
               MinSize + Offset2)
         GV->setAlignment(PrefAlign);
@@ -5211,6 +5211,24 @@ bool CodeGenPrepare::optimizeInst(Instru
   return false;
 }
 
+/// Given an OR instruction, check to see if this is a bitreverse
+/// idiom. If so, insert the new intrinsic and return true.
+static bool makeBitReverse(Instruction &I, const DataLayout &DL,
+                           const TargetLowering &TLI) {
+  if (!I.getType()->isIntegerTy() ||
+      !TLI.isOperationLegalOrCustom(ISD::BITREVERSE,
+                                    TLI.getValueType(DL, I.getType(), true)))
+    return false;
+
+  SmallVector<Instruction*, 4> Insts;
+  if (!recognizeBitReverseOrBSwapIdiom(&I, false, true, Insts))
+    return false;
+  Instruction *LastInst = Insts.back();
+  I.replaceAllUsesWith(LastInst);
+  RecursivelyDeleteTriviallyDeadInstructions(&I);
+  return true;
+}
+
 // In this pass we look for GEP and cast instructions that are used
 // across basic blocks and rewrite them to improve basic-block-at-a-time
 // selection.
@@ -5224,8 +5242,19 @@ bool CodeGenPrepare::optimizeBlock(Basic
     if (ModifiedDT)
       return true;
   }
-  MadeChange |= dupRetToEnableTailCallOpts(&BB);
 
+  bool MadeBitReverse = true;
+  while (TLI && MadeBitReverse) {
+    MadeBitReverse = false;
+    for (auto &I : reverse(BB)) {
+      if (makeBitReverse(I, *DL, *TLI)) {
+        MadeBitReverse = MadeChange = true;
+        break;
+      }
+    }
+  }
+  MadeChange |= dupRetToEnableTailCallOpts(&BB);
+  
   return MadeChange;
 }
 

Modified: vendor/llvm/dist/lib/CodeGen/MachineFunction.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/MachineFunction.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/CodeGen/MachineFunction.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -163,7 +163,7 @@ getOrCreateJumpTableInfo(unsigned EntryK
 }
 
 /// Should we be emitting segmented stack stuff for the function
-bool MachineFunction::shouldSplitStack() {
+bool MachineFunction::shouldSplitStack() const {
   return getFunction()->hasFnAttribute("split-stack");
 }
 

Modified: vendor/llvm/dist/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/SelectionDAG/SelectionDAG.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/CodeGen/SelectionDAG/SelectionDAG.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -377,22 +377,6 @@ static void AddNodeIDOperands(FoldingSet
   }
 }
 
-/// Add logical or fast math flag values to FoldingSetNodeID value.
-static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode,
-                           const SDNodeFlags *Flags) {
-  if (!isBinOpWithFlags(Opcode))
-    return;
-
-  unsigned RawFlags = 0;
-  if (Flags)
-    RawFlags = Flags->getRawFlags();
-  ID.AddInteger(RawFlags);
-}
-
-static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) {
-  AddNodeIDFlags(ID, N->getOpcode(), N->getFlags());
-}
-
 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
                           SDVTList VTList, ArrayRef<SDValue> OpList) {
   AddNodeIDOpcode(ID, OpC);
@@ -528,8 +512,6 @@ static void AddNodeIDCustom(FoldingSetNo
   }
   } // end switch (N->getOpcode())
 
-  AddNodeIDFlags(ID, N);
-
   // Target specific memory nodes could also have address spaces to check.
   if (N->isTargetMemoryOpcode())
     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
@@ -851,6 +833,9 @@ SDNode *SelectionDAG::FindModifiedNodeSl
   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
   AddNodeIDCustom(ID, N);
   SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
+  if (Node)
+    if (const SDNodeFlags *Flags = N->getFlags())
+      Node->intersectFlagsWith(Flags);
   return Node;
 }
 
@@ -869,6 +854,9 @@ SDNode *SelectionDAG::FindModifiedNodeSl
   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
   AddNodeIDCustom(ID, N);
   SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
+  if (Node)
+    if (const SDNodeFlags *Flags = N->getFlags())
+      Node->intersectFlagsWith(Flags);
   return Node;
 }
 
@@ -886,6 +874,9 @@ SDNode *SelectionDAG::FindModifiedNodeSl
   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
   AddNodeIDCustom(ID, N);
   SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
+  if (Node)
+    if (const SDNodeFlags *Flags = N->getFlags())
+      Node->intersectFlagsWith(Flags);
   return Node;
 }
 
@@ -3892,10 +3883,12 @@ SDValue SelectionDAG::getNode(unsigned O
     SDValue Ops[] = {N1, N2};
     FoldingSetNodeID ID;
     AddNodeIDNode(ID, Opcode, VTs, Ops);
-    AddNodeIDFlags(ID, Opcode, Flags);
     void *IP = nullptr;
-    if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
+    if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) {
+      if (Flags)
+        E->intersectFlagsWith(Flags);
       return SDValue(E, 0);
+    }
 
     N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
 
@@ -6249,10 +6242,12 @@ SDNode *SelectionDAG::getNodeIfExists(un
   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
     FoldingSetNodeID ID;
     AddNodeIDNode(ID, Opcode, VTList, Ops);
-    AddNodeIDFlags(ID, Opcode, Flags);
     void *IP = nullptr;
-    if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP))
+    if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP)) {
+      if (Flags)
+        E->intersectFlagsWith(Flags);
       return E;
+    }
   }
   return nullptr;
 }
@@ -6948,6 +6943,11 @@ const SDNodeFlags *SDNode::getFlags() co
   return nullptr;
 }
 
+void SDNode::intersectFlagsWith(const SDNodeFlags *Flags) {
+  if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
+    FlagsNode->Flags.intersectWith(Flags);
+}
+
 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
   assert(N->getNumValues() == 1 &&
          "Can't unroll a vector with multiple results!");

Modified: vendor/llvm/dist/lib/IR/Globals.cpp
==============================================================================
--- vendor/llvm/dist/lib/IR/Globals.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/IR/Globals.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -12,11 +12,12 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "llvm/IR/GlobalValue.h"
 #include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Triple.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DerivedTypes.h"
 #include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
 #include "llvm/IR/GlobalVariable.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/Operator.h"
@@ -134,6 +135,47 @@ bool GlobalValue::isDeclaration() const 
   return false;
 }
 
+bool GlobalValue::canIncreaseAlignment() const {
+  // Firstly, can only increase the alignment of a global if it
+  // is a strong definition.
+  if (!isStrongDefinitionForLinker())
+    return false;
+
+  // It also has to either not have a section defined, or, not have
+  // alignment specified. (If it is assigned a section, the global
+  // could be densely packed with other objects in the section, and
+  // increasing the alignment could cause padding issues.)
+  if (hasSection() && getAlignment() > 0)
+    return false;
+
+  // On ELF platforms, we're further restricted in that we can't
+  // increase the alignment of any variable which might be emitted
+  // into a shared library, and which is exported. If the main
+  // executable accesses a variable found in a shared-lib, the main
+  // exe actually allocates memory for and exports the symbol ITSELF,
+  // overriding the symbol found in the library. That is, at link
+  // time, the observed alignment of the variable is copied into the
+  // executable binary. (A COPY relocation is also generated, to copy
+  // the initial data from the shadowed variable in the shared-lib
+  // into the location in the main binary, before running code.)
+  //
+  // And thus, even though you might think you are defining the
+  // global, and allocating the memory for the global in your object
+  // file, and thus should be able to set the alignment arbitrarily,
+  // that's not actually true. Doing so can cause an ABI breakage; an
+  // executable might have already been built with the previous
+  // alignment of the variable, and then assuming an increased
+  // alignment will be incorrect.
+
+  // Conservatively assume ELF if there's no parent pointer.
+  bool isELF =
+      (!Parent || Triple(Parent->getTargetTriple()).isOSBinFormatELF());
+  if (isELF && hasDefaultVisibility() && !hasLocalLinkage())
+    return false;
+
+  return true;
+}
+
 //===----------------------------------------------------------------------===//
 // GlobalVariable Implementation
 //===----------------------------------------------------------------------===//

Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -10133,6 +10133,7 @@ void AArch64TargetLowering::insertCopies
 
   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
+  MachineBasicBlock::iterator MBBI = Entry->begin();
   for (const MCPhysReg *I = IStart; *I; ++I) {
     const TargetRegisterClass *RC = nullptr;
     if (AArch64::GPR64RegClass.contains(*I))
@@ -10152,13 +10153,13 @@ void AArch64TargetLowering::insertCopies
                Attribute::NoUnwind) &&
            "Function should be nounwind in insertCopiesSplitCSR!");
     Entry->addLiveIn(*I);
-    BuildMI(*Entry, Entry->begin(), DebugLoc(), TII->get(TargetOpcode::COPY),
-            NewVR)
+    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
         .addReg(*I);
 
+    // Insert the copy-back instructions right before the terminator.
     for (auto *Exit : Exits)
-      BuildMI(*Exit, Exit->begin(), DebugLoc(), TII->get(TargetOpcode::COPY),
-              *I)
+      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
+              TII->get(TargetOpcode::COPY), *I)
           .addReg(NewVR);
   }
 }

Modified: vendor/llvm/dist/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -112,9 +112,21 @@ public:
     MCELFStreamer::EmitInstruction(Inst, STI);
   }
 
+  /// Emit a 32-bit value as an instruction. This is only used for the .inst
+  /// directive, EmitInstruction should be used in other cases.
   void emitInst(uint32_t Inst) {
+    char Buffer[4];
+
+    // We can't just use EmitIntValue here, as that will emit a data mapping
+    // symbol, and swap the endianness on big-endian systems (instructions are
+    // always little-endian).
+    for (unsigned I = 0; I < 4; ++I) {
+      Buffer[I] = uint8_t(Inst);
+      Inst >>= 8;
+    }
+
     EmitA64MappingSymbol();
-    MCELFStreamer::EmitIntValue(Inst, 4);
+    MCELFStreamer::EmitBytes(StringRef(Buffer, 4));
   }
 
   /// This is one of the functions used to emit data into an ELF section, so the

Modified: vendor/llvm/dist/lib/Target/ARM/ARMISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMISelLowering.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Target/ARM/ARMISelLowering.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -12423,6 +12423,7 @@ void ARMTargetLowering::insertCopiesSpli
 
   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
+  MachineBasicBlock::iterator MBBI = Entry->begin();
   for (const MCPhysReg *I = IStart; *I; ++I) {
     const TargetRegisterClass *RC = nullptr;
     if (ARM::GPRRegClass.contains(*I))
@@ -12442,13 +12443,13 @@ void ARMTargetLowering::insertCopiesSpli
                Attribute::NoUnwind) &&
            "Function should be nounwind in insertCopiesSplitCSR!");
     Entry->addLiveIn(*I);
-    BuildMI(*Entry, Entry->begin(), DebugLoc(), TII->get(TargetOpcode::COPY),
-            NewVR)
+    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
         .addReg(*I);
 
+    // Insert the copy-back instructions right before the terminator.
     for (auto *Exit : Exits)
-      BuildMI(*Exit, Exit->begin(), DebugLoc(), TII->get(TargetOpcode::COPY),
-              *I)
+      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
+              TII->get(TargetOpcode::COPY), *I)
           .addReg(NewVR);
   }
 }

Modified: vendor/llvm/dist/lib/Target/X86/X86CallingConv.td
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86CallingConv.td	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Target/X86/X86CallingConv.td	Fri Jan 22 21:16:09 2016	(r294602)
@@ -832,10 +832,10 @@ def CSR_64_TLS_Darwin : CalleeSavedRegs<
                                              R8, R9, R10, R11)>;
 
 // CSRs that are handled by prologue, epilogue.
-def CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add)>;
+def CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
 
 // CSRs that are handled explicitly via copies.
-def CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(add CSR_64_TLS_Darwin)>;
+def CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
 
 // All GPRs - except r11
 def CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,

Modified: vendor/llvm/dist/lib/Target/X86/X86FrameLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86FrameLowering.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Target/X86/X86FrameLowering.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -2031,6 +2031,10 @@ void X86FrameLowering::adjustForSegmente
   unsigned TlsReg, TlsOffset;
   DebugLoc DL;
 
+  // To support shrink-wrapping we would need to insert the new blocks
+  // at the right place and update the branches to PrologueMBB.
+  assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
+
   unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
   assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
          "Scratch register is live-in");
@@ -2271,6 +2275,11 @@ void X86FrameLowering::adjustForHiPEProl
     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
   MachineFrameInfo *MFI = MF.getFrameInfo();
   DebugLoc DL;
+
+  // To support shrink-wrapping we would need to insert the new blocks
+  // at the right place and update the branches to PrologueMBB.
+  assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
+
   // HiPE-specific values
   const unsigned HipeLeafWords = 24;
   const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
@@ -2584,7 +2593,14 @@ bool X86FrameLowering::canUseAsEpilogue(
 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
   // If we may need to emit frameless compact unwind information, give
   // up as this is currently broken: PR25614.
-  return MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF);
+  return (MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
+         // The lowering of segmented stack and HiPE only support entry blocks
+         // as prologue blocks: PR26107.
+         // This limitation may be lifted if we fix:
+         // - adjustForSegmentedStacks
+         // - adjustForHiPEPrologue
+         MF.getFunction()->getCallingConv() != CallingConv::HiPE &&
+         !MF.shouldSplitStack();
 }
 
 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(

Modified: vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -28908,6 +28908,7 @@ void X86TargetLowering::insertCopiesSpli
 
   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
+  MachineBasicBlock::iterator MBBI = Entry->begin();
   for (const MCPhysReg *I = IStart; *I; ++I) {
     const TargetRegisterClass *RC = nullptr;
     if (X86::GR64RegClass.contains(*I))
@@ -28925,13 +28926,13 @@ void X86TargetLowering::insertCopiesSpli
                Attribute::NoUnwind) &&
            "Function should be nounwind in insertCopiesSplitCSR!");
     Entry->addLiveIn(*I);
-    BuildMI(*Entry, Entry->begin(), DebugLoc(), TII->get(TargetOpcode::COPY),
-            NewVR)
+    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
         .addReg(*I);
 
+    // Insert the copy-back instructions right before the terminator.
     for (auto *Exit : Exits)
-      BuildMI(*Exit, Exit->begin(), DebugLoc(), TII->get(TargetOpcode::COPY),
-              *I)
+      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
+              TII->get(TargetOpcode::COPY), *I)
           .addReg(NewVR);
   }
 }

Modified: vendor/llvm/dist/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
==============================================================================
--- vendor/llvm/dist/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -17,6 +17,7 @@
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/PatternMatch.h"
 #include "llvm/Transforms/Utils/CmpInstAnalysis.h"
+#include "llvm/Transforms/Utils/Local.h"
 using namespace llvm;
 using namespace PatternMatch;
 
@@ -1565,190 +1566,18 @@ Instruction *InstCombiner::visitAnd(Bina
   return Changed ? &I : nullptr;
 }
 
-
-/// Analyze the specified subexpression and see if it is capable of providing
-/// pieces of a bswap or bitreverse. The subexpression provides a potential
-/// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
-/// the output of the expression came from a corresponding bit in some other
-/// value. This function is recursive, and the end result is a mapping of
-/// (value, bitnumber) to bitnumber. It is the caller's responsibility to
-/// validate that all `value`s are identical and that the bitnumber to bitnumber
-/// mapping is correct for a bswap or bitreverse.
-///
-/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
-/// that the expression deposits the low byte of %X into the high byte of the
-/// result and that all other bits are zero. This expression is accepted,
-/// BitValues[24-31] are set to %X and BitProvenance[24-31] are set to [0-7].
-///
-/// This function returns true if the match was unsuccessful and false if so.
-/// On entry to the function the "OverallLeftShift" is a signed integer value
-/// indicating the number of bits that the subexpression is later shifted.  For
-/// example, if the expression is later right shifted by 16 bits, the
-/// OverallLeftShift value would be -16 on entry.  This is used to specify which
-/// bits of BitValues are actually being set.
-///
-/// Similarly, BitMask is a bitmask where a bit is clear if its corresponding
-/// bit is masked to zero by a user.  For example, in (X & 255), X will be
-/// processed with a bytemask of 255. BitMask is always in the local
-/// (OverallLeftShift) coordinate space.
-///
-static bool CollectBitParts(Value *V, int OverallLeftShift, APInt BitMask,
-                            SmallVectorImpl<Value *> &BitValues,
-                            SmallVectorImpl<int> &BitProvenance) {
-  if (Instruction *I = dyn_cast<Instruction>(V)) {
-    // If this is an or instruction, it may be an inner node of the bswap.
-    if (I->getOpcode() == Instruction::Or)
-      return CollectBitParts(I->getOperand(0), OverallLeftShift, BitMask,
-                             BitValues, BitProvenance) ||
-             CollectBitParts(I->getOperand(1), OverallLeftShift, BitMask,
-                             BitValues, BitProvenance);
-
-    // If this is a logical shift by a constant, recurse with OverallLeftShift
-    // and BitMask adjusted.
-    if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
-      unsigned ShAmt =
-          cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
-      // Ensure the shift amount is defined.
-      if (ShAmt > BitValues.size())
-        return true;
-
-      unsigned BitShift = ShAmt;
-      if (I->getOpcode() == Instruction::Shl) {
-        // X << C -> collect(X, +C)
-        OverallLeftShift += BitShift;
-        BitMask = BitMask.lshr(BitShift);
-      } else {
-        // X >>u C -> collect(X, -C)
-        OverallLeftShift -= BitShift;
-        BitMask = BitMask.shl(BitShift);
-      }
-
-      if (OverallLeftShift >= (int)BitValues.size())
-        return true;
-      if (OverallLeftShift <= -(int)BitValues.size())
-        return true;
-
-      return CollectBitParts(I->getOperand(0), OverallLeftShift, BitMask,
-                             BitValues, BitProvenance);
-    }
-
-    // If this is a logical 'and' with a mask that clears bits, clear the
-    // corresponding bits in BitMask.
-    if (I->getOpcode() == Instruction::And &&
-        isa<ConstantInt>(I->getOperand(1))) {
-      unsigned NumBits = BitValues.size();
-      APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
-      const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
-
-      for (unsigned i = 0; i != NumBits; ++i, Bit <<= 1) {
-        // If this bit is masked out by a later operation, we don't care what
-        // the and mask is.
-        if (BitMask[i] == 0)
-          continue;
-
-        // If the AndMask is zero for this bit, clear the bit.
-        APInt MaskB = AndMask & Bit;
-        if (MaskB == 0) {
-          BitMask.clearBit(i);
-          continue;
-        }
-
-        // Otherwise, this bit is kept.
-      }
-
-      return CollectBitParts(I->getOperand(0), OverallLeftShift, BitMask,
-                             BitValues, BitProvenance);
-    }
-  }
-
-  // Okay, we got to something that isn't a shift, 'or' or 'and'.  This must be
-  // the input value to the bswap/bitreverse. To be part of a bswap or
-  // bitreverse we must be demanding a contiguous range of bits from it.
-  unsigned InputBitLen = BitMask.countPopulation();
-  unsigned InputBitNo = BitMask.countTrailingZeros();
-  if (BitMask.getBitWidth() - BitMask.countLeadingZeros() - InputBitNo !=
-      InputBitLen)
-    // Not a contiguous set range of bits!
-    return true;
-
-  // We know we're moving a contiguous range of bits from the input to the
-  // output. Record which bits in the output came from which bits in the input.
-  unsigned DestBitNo = InputBitNo + OverallLeftShift;
-  for (unsigned I = 0; I < InputBitLen; ++I)
-    BitProvenance[DestBitNo + I] = InputBitNo + I;
-
-  // If the destination bit value is already defined, the values are or'd
-  // together, which isn't a bswap/bitreverse (unless it's an or of the same
-  // bits).
-  if (BitValues[DestBitNo] && BitValues[DestBitNo] != V)
-    return true;
-  for (unsigned I = 0; I < InputBitLen; ++I)
-    BitValues[DestBitNo + I] = V;
-
-  return false;
-}
-
-static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
-                                          unsigned BitWidth) {
-  if (From % 8 != To % 8)
-    return false;
-  // Convert from bit indices to byte indices and check for a byte reversal.
-  From >>= 3;
-  To >>= 3;
-  BitWidth >>= 3;
-  return From == BitWidth - To - 1;
-}
-
-static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
-                                               unsigned BitWidth) {
-  return From == BitWidth - To - 1;
-}
-
 /// Given an OR instruction, check to see if this is a bswap or bitreverse
 /// idiom. If so, insert the new intrinsic and return it.
 Instruction *InstCombiner::MatchBSwapOrBitReverse(BinaryOperator &I) {
-  IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
-  if (!ITy)
-    return nullptr;   // Can't do vectors.
-  unsigned BW = ITy->getBitWidth();
-  
-  /// We keep track of which bit (BitProvenance) inside which value (BitValues)
-  /// defines each bit in the result.
-  SmallVector<Value *, 8> BitValues(BW, nullptr);
-  SmallVector<int, 8> BitProvenance(BW, -1);
-  
-  // Try to find all the pieces corresponding to the bswap.
-  APInt BitMask = APInt::getAllOnesValue(BitValues.size());
-  if (CollectBitParts(&I, 0, BitMask, BitValues, BitProvenance))
-    return nullptr;
-
-  // Check to see if all of the bits come from the same value.
-  Value *V = BitValues[0];
-  if (!V) return nullptr;  // Didn't find a bit?  Must be zero.
-
-  if (!std::all_of(BitValues.begin(), BitValues.end(),
-                   [&](const Value *X) { return X == V; }))
-    return nullptr;
-
-  // Now, is the bit permutation correct for a bswap or a bitreverse? We can
-  // only byteswap values with an even number of bytes.
-  bool OKForBSwap = BW % 16 == 0, OKForBitReverse = true;;
-  for (unsigned i = 0, e = BitValues.size(); i != e; ++i) {
-    OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[i], i, BW);
-    OKForBitReverse &=
-        bitTransformIsCorrectForBitReverse(BitProvenance[i], i, BW);
-  }
-
-  Intrinsic::ID Intrin;
-  if (OKForBSwap)
-    Intrin = Intrinsic::bswap;
-  else if (OKForBitReverse)
-    Intrin = Intrinsic::bitreverse;
-  else
+  SmallVector<Instruction*, 4> Insts;
+  if (!recognizeBitReverseOrBSwapIdiom(&I, true, false, Insts))
     return nullptr;
+  Instruction *LastInst = Insts.pop_back_val();
+  LastInst->removeFromParent();
 
-  Function *F = Intrinsic::getDeclaration(I.getModule(), Intrin, ITy);
-  return CallInst::Create(F, V);
+  for (auto *Inst : Insts)
+    Worklist.Add(Inst);
+  return LastInst;
 }
 
 /// We have an expression of the form (A&C)|(B&D).  Check if A is (cond?-1:0)

Modified: vendor/llvm/dist/lib/Transforms/Utils/InlineFunction.cpp
==============================================================================
--- vendor/llvm/dist/lib/Transforms/Utils/InlineFunction.cpp	Fri Jan 22 21:14:05 2016	(r294601)
+++ vendor/llvm/dist/lib/Transforms/Utils/InlineFunction.cpp	Fri Jan 22 21:16:09 2016	(r294602)
@@ -179,13 +179,244 @@ void LandingPadInliningInfo::forwardResu
   RI->eraseFromParent();
 }
 
+/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
+static Value *getParentPad(Value *EHPad) {
+  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
+    return FPI->getParentPad();
+  return cast<CatchSwitchInst>(EHPad)->getParentPad();
+}
+
+typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy;
+
+/// Helper for getUnwindDestToken that does the descendant-ward part of
+/// the search.
+static Value *getUnwindDestTokenHelper(Instruction *EHPad,
+                                       UnwindDestMemoTy &MemoMap) {
+  SmallVector<Instruction *, 8> Worklist(1, EHPad);
+
+  while (!Worklist.empty()) {
+    Instruction *CurrentPad = Worklist.pop_back_val();
+    // We only put pads on the worklist that aren't in the MemoMap.  When
+    // we find an unwind dest for a pad we may update its ancestors, but
+    // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
+    // so they should never get updated while queued on the worklist.
+    assert(!MemoMap.count(CurrentPad));
+    Value *UnwindDestToken = nullptr;
+    if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
+      if (CatchSwitch->hasUnwindDest()) {
+        UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
+      } else {
+        // Catchswitch doesn't have a 'nounwind' variant, and one might be
+        // annotated as "unwinds to caller" when really it's nounwind (see
+        // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
+        // parent's unwind dest from this.  We can check its catchpads'
+        // descendants, since they might include a cleanuppad with an
+        // "unwinds to caller" cleanupret, which can be trusted.
+        for (auto HI = CatchSwitch->handler_begin(),
+                  HE = CatchSwitch->handler_end();
+             HI != HE && !UnwindDestToken; ++HI) {
+          BasicBlock *HandlerBlock = *HI;
+          auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
+          for (User *Child : CatchPad->users()) {
+            // Intentionally ignore invokes here -- since the catchswitch is
+            // marked "unwind to caller", it would be a verifier error if it
+            // contained an invoke which unwinds out of it, so any invoke we'd
+            // encounter must unwind to some child of the catch.
+            if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
+              continue;
+
+            Instruction *ChildPad = cast<Instruction>(Child);
+            auto Memo = MemoMap.find(ChildPad);
+            if (Memo == MemoMap.end()) {
+              // Haven't figure out this child pad yet; queue it.
+              Worklist.push_back(ChildPad);
+              continue;
+            }
+            // We've already checked this child, but might have found that
+            // it offers no proof either way.
+            Value *ChildUnwindDestToken = Memo->second;
+            if (!ChildUnwindDestToken)
+              continue;
+            // We already know the child's unwind dest, which can either
+            // be ConstantTokenNone to indicate unwind to caller, or can
+            // be another child of the catchpad.  Only the former indicates
+            // the unwind dest of the catchswitch.
+            if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
+              UnwindDestToken = ChildUnwindDestToken;
+              break;
+            }
+            assert(getParentPad(ChildUnwindDestToken) == CatchPad);
+          }
+        }
+      }
+    } else {
+      auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
+      for (User *U : CleanupPad->users()) {
+        if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
+          if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
+            UnwindDestToken = RetUnwindDest->getFirstNonPHI();
+          else
+            UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
+          break;
+        }
+        Value *ChildUnwindDestToken;
+        if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
+          ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
+        } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
+          Instruction *ChildPad = cast<Instruction>(U);
+          auto Memo = MemoMap.find(ChildPad);
+          if (Memo == MemoMap.end()) {
+            // Haven't resolved this child yet; queue it and keep searching.
+            Worklist.push_back(ChildPad);
+            continue;
+          }
+          // We've checked this child, but still need to ignore it if it
+          // had no proof either way.
+          ChildUnwindDestToken = Memo->second;
+          if (!ChildUnwindDestToken)
+            continue;
+        } else {
+          // Not a relevant user of the cleanuppad
+          continue;
+        }
+        // In a well-formed program, the child/invoke must either unwind to
+        // an(other) child of the cleanup, or exit the cleanup.  In the
+        // first case, continue searching.
+        if (isa<Instruction>(ChildUnwindDestToken) &&
+            getParentPad(ChildUnwindDestToken) == CleanupPad)
+          continue;
+        UnwindDestToken = ChildUnwindDestToken;
+        break;
+      }
+    }
+    // If we haven't found an unwind dest for CurrentPad, we may have queued its
+    // children, so move on to the next in the worklist.
+    if (!UnwindDestToken)
+      continue;
+
+    // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
+    // any ancestors of CurrentPad up to but not including UnwindDestToken's
+    // parent pad.  Record this in the memo map, and check to see if the
+    // original EHPad being queried is one of the ones exited.
+    Value *UnwindParent;
+    if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
+      UnwindParent = getParentPad(UnwindPad);
+    else
+      UnwindParent = nullptr;
+    bool ExitedOriginalPad = false;
+    for (Instruction *ExitedPad = CurrentPad;
+         ExitedPad && ExitedPad != UnwindParent;
+         ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
+      // Skip over catchpads since they just follow their catchswitches.
+      if (isa<CatchPadInst>(ExitedPad))
+        continue;
+      MemoMap[ExitedPad] = UnwindDestToken;
+      ExitedOriginalPad |= (ExitedPad == EHPad);
+    }
+
+    if (ExitedOriginalPad)
+      return UnwindDestToken;
+
+    // Continue the search.
+  }
+
+  // No definitive information is contained within this funclet.
+  return nullptr;
+}
+
+/// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
+/// return that pad instruction.  If it unwinds to caller, return
+/// ConstantTokenNone.  If it does not have a definitive unwind destination,
+/// return nullptr.
+///
+/// This routine gets invoked for calls in funclets in inlinees when inlining
+/// an invoke.  Since many funclets don't have calls inside them, it's queried
+/// on-demand rather than building a map of pads to unwind dests up front.
+/// Determining a funclet's unwind dest may require recursively searching its
+/// descendants, and also ancestors and cousins if the descendants don't provide
+/// an answer.  Since most funclets will have their unwind dest immediately
+/// available as the unwind dest of a catchswitch or cleanupret, this routine
+/// searches top-down from the given pad and then up. To avoid worst-case
+/// quadratic run-time given that approach, it uses a memo map to avoid
+/// re-processing funclet trees.  The callers that rewrite the IR as they go
+/// take advantage of this, for correctness, by checking/forcing rewritten
+/// pads' entries to match the original callee view.
+static Value *getUnwindDestToken(Instruction *EHPad,
+                                 UnwindDestMemoTy &MemoMap) {
+  // Catchpads unwind to the same place as their catchswitch;
+  // redirct any queries on catchpads so the code below can
+  // deal with just catchswitches and cleanuppads.
+  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
+    EHPad = CPI->getCatchSwitch();
+
+  // Check if we've already determined the unwind dest for this pad.
+  auto Memo = MemoMap.find(EHPad);
+  if (Memo != MemoMap.end())
+    return Memo->second;
+
+  // Search EHPad and, if necessary, its descendants.
+  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
+  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
+  if (UnwindDestToken)
+    return UnwindDestToken;
+
+  // No information is available for this EHPad from itself or any of its
+  // descendants.  An unwind all the way out to a pad in the caller would
+  // need also to agree with the unwind dest of the parent funclet, so
+  // search up the chain to try to find a funclet with information.  Put
+  // null entries in the memo map to avoid re-processing as we go up.
+  MemoMap[EHPad] = nullptr;
+  Instruction *LastUselessPad = EHPad;
+  Value *AncestorToken;
+  for (AncestorToken = getParentPad(EHPad);
+       auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
+       AncestorToken = getParentPad(AncestorToken)) {
+    // Skip over catchpads since they just follow their catchswitches.
+    if (isa<CatchPadInst>(AncestorPad))
+      continue;
+    assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
+    auto AncestorMemo = MemoMap.find(AncestorPad);
+    if (AncestorMemo == MemoMap.end()) {
+      UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
+    } else {
+      UnwindDestToken = AncestorMemo->second;
+    }
+    if (UnwindDestToken)
+      break;
+    LastUselessPad = AncestorPad;
+  }
+
+  // Since the whole tree under LastUselessPad has no information, it all must
+  // match UnwindDestToken; record that to avoid repeating the search.
+  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
+  while (!Worklist.empty()) {
+    Instruction *UselessPad = Worklist.pop_back_val();
+    assert(!MemoMap.count(UselessPad) || MemoMap[UselessPad] == nullptr);
+    MemoMap[UselessPad] = UnwindDestToken;
+    if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
+      for (BasicBlock *HandlerBlock : CatchSwitch->handlers())
+        for (User *U : HandlerBlock->getFirstNonPHI()->users())
+          if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
+            Worklist.push_back(cast<Instruction>(U));
+    } else {
+      assert(isa<CleanupPadInst>(UselessPad));
+      for (User *U : UselessPad->users())
+        if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
+          Worklist.push_back(cast<Instruction>(U));
+    }
+  }
+
+  return UnwindDestToken;
+}
+
 /// When we inline a basic block into an invoke,
 /// we have to turn all of the calls that can throw into invokes.
 /// This function analyze BB to see if there are any calls, and if so,
 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
 /// nodes in that block with the values specified in InvokeDestPHIValues.
-static BasicBlock *
-HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge) {
+static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list