svn commit: r328362 - in vendor/llvm/dist-release_60: cmake/modules docs include/llvm/Analysis include/llvm/CodeGen include/llvm/MC include/llvm/Support include/llvm/Transforms/Vectorize lib/CodeGe...
Dimitry Andric
dim at FreeBSD.org
Wed Jan 24 20:23:51 UTC 2018
Author: dim
Date: Wed Jan 24 20:23:48 2018
New Revision: 328362
URL: https://svnweb.freebsd.org/changeset/base/328362
Log:
Vendor import of llvm release_60 branch r323338:
https://llvm.org/svn/llvm-project/llvm/branches/release_60@323338
Added:
vendor/llvm/dist-release_60/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir
vendor/llvm/dist-release_60/test/CodeGen/ARM/global-merge-dllexport.ll
vendor/llvm/dist-release_60/test/CodeGen/ARM/peephole-phi.mir
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/PR35812-neg-cmpxchg.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/pr35761.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/pr35972.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/pr37563.ll
vendor/llvm/dist-release_60/test/ThinLTO/X86/Inputs/dicompositetype-unique2.ll
vendor/llvm/dist-release_60/test/ThinLTO/X86/dicompositetype-unique2.ll
vendor/llvm/dist-release_60/test/Transforms/CodeGenPrepare/X86/sink-addrmode-select.ll
vendor/llvm/dist-release_60/test/Transforms/JumpThreading/ddt-crash3.ll
vendor/llvm/dist-release_60/test/Transforms/JumpThreading/ddt-crash4.ll
vendor/llvm/dist-release_60/test/Transforms/LoopVectorize/pr35773.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/PR35628_1.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/PR35628_2.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/PR35777.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/PR35865.ll
vendor/llvm/dist-release_60/test/Transforms/StructurizeCFG/AMDGPU/
vendor/llvm/dist-release_60/test/Transforms/StructurizeCFG/AMDGPU/backedge-id-bug-xfail.ll
vendor/llvm/dist-release_60/test/Transforms/StructurizeCFG/AMDGPU/backedge-id-bug.ll
vendor/llvm/dist-release_60/test/Transforms/StructurizeCFG/AMDGPU/lit.local.cfg
vendor/llvm/dist-release_60/test/tools/llvm-readobj/macho-needed-libs.test
Modified:
vendor/llvm/dist-release_60/cmake/modules/LLVMConfig.cmake.in
vendor/llvm/dist-release_60/docs/ReleaseNotes.rst
vendor/llvm/dist-release_60/include/llvm/Analysis/RegionInfoImpl.h
vendor/llvm/dist-release_60/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
vendor/llvm/dist-release_60/include/llvm/MC/MCCodeView.h
vendor/llvm/dist-release_60/include/llvm/Support/GenericDomTreeConstruction.h
vendor/llvm/dist-release_60/include/llvm/Transforms/Vectorize/SLPVectorizer.h
vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp
vendor/llvm/dist-release_60/lib/CodeGen/GlobalMerge.cpp
vendor/llvm/dist-release_60/lib/CodeGen/PeepholeOptimizer.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
vendor/llvm/dist-release_60/lib/CodeGen/TargetLoweringBase.cpp
vendor/llvm/dist-release_60/lib/Linker/IRMover.cpp
vendor/llvm/dist-release_60/lib/MC/MCCodeView.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64InstructionSelector.cpp
vendor/llvm/dist-release_60/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCISelLowering.h
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCInstrInfo.td
vendor/llvm/dist-release_60/lib/Target/X86/AsmParser/X86AsmParser.cpp
vendor/llvm/dist-release_60/lib/Target/X86/X86ISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/X86/X86TargetTransformInfo.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/GVNHoist.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/StructurizeCFG.cpp
vendor/llvm/dist-release_60/lib/Transforms/Vectorize/LoopVectorize.cpp
vendor/llvm/dist-release_60/lib/Transforms/Vectorize/SLPVectorizer.cpp
vendor/llvm/dist-release_60/test/CodeGen/AArch64/atomic-ops-lse.ll
vendor/llvm/dist-release_60/test/CodeGen/AMDGPU/multilevel-break.ll
vendor/llvm/dist-release_60/test/CodeGen/AMDGPU/nested-loop-conditions.ll
vendor/llvm/dist-release_60/test/CodeGen/ARM/and-load-combine.ll
vendor/llvm/dist-release_60/test/CodeGen/ARM/atomic-cmpxchg.ll
vendor/llvm/dist-release_60/test/CodeGen/ARM/cmpxchg-O0.ll
vendor/llvm/dist-release_60/test/CodeGen/ARM/global-merge-external.ll
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/atomics-regression.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/darwin-bzero.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/inline-asm-A-constraint.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/var-permute-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/var-permute-256.ll
vendor/llvm/dist-release_60/test/MC/COFF/cv-inline-linetable.s
vendor/llvm/dist-release_60/test/MC/X86/x86-64.s
vendor/llvm/dist-release_60/test/Transforms/GVNHoist/pr35222-hoist-load.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/insertvalue.ll
vendor/llvm/dist-release_60/test/Transforms/SLPVectorizer/X86/value-bug.ll
vendor/llvm/dist-release_60/test/Transforms/StructurizeCFG/nested-loop-order.ll
vendor/llvm/dist-release_60/tools/llvm-readobj/MachODumper.cpp
vendor/llvm/dist-release_60/unittests/IR/DominatorTreeBatchUpdatesTest.cpp
vendor/llvm/dist-release_60/unittests/IR/DominatorTreeTest.cpp
vendor/llvm/dist-release_60/utils/release/test-release.sh
Modified: vendor/llvm/dist-release_60/cmake/modules/LLVMConfig.cmake.in
==============================================================================
--- vendor/llvm/dist-release_60/cmake/modules/LLVMConfig.cmake.in Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/cmake/modules/LLVMConfig.cmake.in Wed Jan 24 20:23:48 2018 (r328362)
@@ -37,6 +37,8 @@ set(LLVM_ENABLE_THREADS @LLVM_ENABLE_THREADS@)
set(LLVM_ENABLE_ZLIB @LLVM_ENABLE_ZLIB@)
+set(LLVM_LIBXML2_ENABLED @LLVM_LIBXML2_ENABLED@)
+
set(LLVM_ENABLE_DIA_SDK @LLVM_ENABLE_DIA_SDK@)
set(LLVM_NATIVE_ARCH @LLVM_NATIVE_ARCH@)
Modified: vendor/llvm/dist-release_60/docs/ReleaseNotes.rst
==============================================================================
--- vendor/llvm/dist-release_60/docs/ReleaseNotes.rst Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/docs/ReleaseNotes.rst Wed Jan 24 20:23:48 2018 (r328362)
@@ -54,6 +54,8 @@ Non-comprehensive list of changes in this release
``DIVariables`` to the instructions in a ``Module``. The ``CheckDebugify``
pass determines how much of the metadata is lost.
+* Significantly improved quality of CodeView debug info for Windows.
+
* Note..
.. NOTE
@@ -69,12 +71,15 @@ Non-comprehensive list of changes in this release
Changes to the LLVM IR
----------------------
-Changes to the ARM Backend
---------------------------
+Changes to the ARM Target
+-------------------------
- During this release ...
+During this release the ARM target has:
+* Got support for enabling SjLj exception handling on platforms where it
+ isn't the default.
+
Changes to the MIPS Target
--------------------------
@@ -89,8 +94,11 @@ Changes to the PowerPC Target
Changes to the X86 Target
-------------------------
- During this release ...
+During this release ...
+* Got support for enabling SjLj exception handling on platforms where it
+ isn't the default.
+
Changes to the AMDGPU Target
-----------------------------
@@ -116,8 +124,46 @@ Changes to the C API
External Open Source Projects Using LLVM 6
==========================================
-* A project...
+JFS - JIT Fuzzing Solver
+------------------------
+`JFS <https://github.com/delcypher/jfs>`_ is an experimental constraint solver
+designed to investigate using coverage guided fuzzing as an incomplete strategy
+for solving boolean, BitVector, and floating-point constraints.
+It is built on top of LLVM, Clang, LibFuzzer, and Z3.
+
+The solver works by generating a C++ program where the reachability of an
+`abort()` statement is equivalent to finding a satisfying assignment to the
+constraints. This program is then compiled by Clang with `SanitizerCoverage
+<https://releases.llvm.org/6.0.0/tools/clang/docs/SanitizerCoverage.html>`_
+instrumentation and then fuzzed using :doc:`LibFuzzer <LibFuzzer>`.
+
+Zig Programming Language
+------------------------
+
+`Zig <http://ziglang.org>`_ is an open-source programming language designed
+for robustness, optimality, and clarity. It is intended to replace C. It
+provides high level features such as Generics,
+Compile Time Function Execution, and Partial Evaluation, yet exposes low level
+LLVM IR features such as Aliases. Zig uses Clang to provide automatic
+import of .h symbols - even inline functions and macros. Zig uses LLD combined
+with lazily building compiler-rt to provide out-of-the-box cross-compiling for
+all supported targets.
+
+LDC - the LLVM-based D compiler
+-------------------------------
+
+`D <http://dlang.org>`_ is a language with C-like syntax and static typing. It
+pragmatically combines efficiency, control, and modeling power, with safety and
+programmer productivity. D supports powerful concepts like Compile-Time Function
+Execution (CTFE) and Template Meta-Programming, provides an innovative approach
+to concurrency and offers many classical paradigms.
+
+`LDC <http://wiki.dlang.org/LDC>`_ uses the frontend from the reference compiler
+combined with LLVM as backend to produce efficient native code. LDC targets
+x86/x86_64 systems like Linux, OS X, FreeBSD and Windows and also Linux on ARM
+and PowerPC (32/64 bit). Ports to other architectures like AArch64 and MIPS64
+are underway.
Additional Information
======================
Modified: vendor/llvm/dist-release_60/include/llvm/Analysis/RegionInfoImpl.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/Analysis/RegionInfoImpl.h Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/include/llvm/Analysis/RegionInfoImpl.h Wed Jan 24 20:23:48 2018 (r328362)
@@ -254,23 +254,23 @@ std::string RegionBase<Tr>::getNameStr() const {
template <class Tr>
void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
if (!contains(BB))
- llvm_unreachable("Broken region found: enumerated BB not in region!");
+ report_fatal_error("Broken region found: enumerated BB not in region!");
BlockT *entry = getEntry(), *exit = getExit();
for (BlockT *Succ :
make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
if (!contains(Succ) && exit != Succ)
- llvm_unreachable("Broken region found: edges leaving the region must go "
- "to the exit node!");
+ report_fatal_error("Broken region found: edges leaving the region must go "
+ "to the exit node!");
}
if (entry != BB) {
for (BlockT *Pred : make_range(InvBlockTraits::child_begin(BB),
InvBlockTraits::child_end(BB))) {
if (!contains(Pred))
- llvm_unreachable("Broken region found: edges entering the region must "
- "go to the entry node!");
+ report_fatal_error("Broken region found: edges entering the region must "
+ "go to the entry node!");
}
}
}
@@ -557,7 +557,7 @@ void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R)
} else {
BlockT *BB = Element->template getNodeAs<BlockT>();
if (getRegionFor(BB) != R)
- llvm_unreachable("BB map does not match region nesting");
+ report_fatal_error("BB map does not match region nesting");
}
}
}
Modified: vendor/llvm/dist-release_60/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h Wed Jan 24 20:23:48 2018 (r328362)
@@ -56,7 +56,7 @@ class BaseIndexOffset { (public)
int64_t &Off);
/// Parses tree in Ptr for base, index, offset addresses.
- static BaseIndexOffset match(SDValue Ptr, const SelectionDAG &DAG);
+ static BaseIndexOffset match(LSBaseSDNode *N, const SelectionDAG &DAG);
};
} // end namespace llvm
Modified: vendor/llvm/dist-release_60/include/llvm/MC/MCCodeView.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/MC/MCCodeView.h Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/include/llvm/MC/MCCodeView.h Wed Jan 24 20:23:48 2018 (r328362)
@@ -177,13 +177,7 @@ class CodeViewContext { (public)
unsigned IACol);
/// Retreive the function info if this is a valid function id, or nullptr.
- MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId) {
- if (FuncId >= Functions.size())
- return nullptr;
- if (Functions[FuncId].isUnallocatedFunctionInfo())
- return nullptr;
- return &Functions[FuncId];
- }
+ MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId);
/// Saves the information from the currently parsed .cv_loc directive
/// and sets CVLocSeen. When the next instruction is assembled an entry
@@ -199,50 +193,22 @@ class CodeViewContext { (public)
CurrentCVLoc.setIsStmt(IsStmt);
CVLocSeen = true;
}
- void clearCVLocSeen() { CVLocSeen = false; }
bool getCVLocSeen() { return CVLocSeen; }
+ void clearCVLocSeen() { CVLocSeen = false; }
+
const MCCVLoc &getCurrentCVLoc() { return CurrentCVLoc; }
bool isValidCVFileNumber(unsigned FileNumber);
/// \brief Add a line entry.
- void addLineEntry(const MCCVLineEntry &LineEntry) {
- size_t Offset = MCCVLines.size();
- auto I = MCCVLineStartStop.insert(
- {LineEntry.getFunctionId(), {Offset, Offset + 1}});
- if (!I.second)
- I.first->second.second = Offset + 1;
- MCCVLines.push_back(LineEntry);
- }
+ void addLineEntry(const MCCVLineEntry &LineEntry);
- std::vector<MCCVLineEntry> getFunctionLineEntries(unsigned FuncId) {
- std::vector<MCCVLineEntry> FilteredLines;
+ std::vector<MCCVLineEntry> getFunctionLineEntries(unsigned FuncId);
- auto I = MCCVLineStartStop.find(FuncId);
- if (I != MCCVLineStartStop.end())
- for (size_t Idx = I->second.first, End = I->second.second; Idx != End;
- ++Idx)
- if (MCCVLines[Idx].getFunctionId() == FuncId)
- FilteredLines.push_back(MCCVLines[Idx]);
- return FilteredLines;
- }
+ std::pair<size_t, size_t> getLineExtent(unsigned FuncId);
- std::pair<size_t, size_t> getLineExtent(unsigned FuncId) {
- auto I = MCCVLineStartStop.find(FuncId);
- // Return an empty extent if there are no cv_locs for this function id.
- if (I == MCCVLineStartStop.end())
- return {~0ULL, 0};
- return I->second;
- }
-
- ArrayRef<MCCVLineEntry> getLinesForExtent(size_t L, size_t R) {
- if (R <= L)
- return None;
- if (L >= MCCVLines.size())
- return None;
- return makeArrayRef(&MCCVLines[L], R - L);
- }
+ ArrayRef<MCCVLineEntry> getLinesForExtent(size_t L, size_t R);
/// Emits a line table substream.
void emitLineTableForFunction(MCObjectStreamer &OS, unsigned FuncId,
Modified: vendor/llvm/dist-release_60/include/llvm/Support/GenericDomTreeConstruction.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/Support/GenericDomTreeConstruction.h Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/include/llvm/Support/GenericDomTreeConstruction.h Wed Jan 24 20:23:48 2018 (r328362)
@@ -628,7 +628,7 @@ struct SemiNCAInfo {
DecreasingLevel>
Bucket; // Queue of tree nodes sorted by level in descending order.
SmallDenseSet<TreeNodePtr, 8> Affected;
- SmallDenseSet<TreeNodePtr, 8> Visited;
+ SmallDenseMap<TreeNodePtr, unsigned, 8> Visited;
SmallVector<TreeNodePtr, 8> AffectedQueue;
SmallVector<TreeNodePtr, 8> VisitedNotAffectedQueue;
};
@@ -706,7 +706,7 @@ struct SemiNCAInfo {
// algorithm does not really know or use the set of roots and can make a
// different (implicit) decision about which nodes within an infinite loop
// becomes a root.
- if (DT.isVirtualRoot(TN->getIDom())) {
+ if (TN && !DT.isVirtualRoot(TN->getIDom())) {
DEBUG(dbgs() << "Root " << BlockNamePrinter(R)
<< " is not virtual root's child\n"
<< "The entire tree needs to be rebuilt\n");
@@ -753,14 +753,16 @@ struct SemiNCAInfo {
while (!II.Bucket.empty()) {
const TreeNodePtr CurrentNode = II.Bucket.top().second;
+ const unsigned CurrentLevel = CurrentNode->getLevel();
II.Bucket.pop();
DEBUG(dbgs() << "\tAdding to Visited and AffectedQueue: "
<< BlockNamePrinter(CurrentNode) << "\n");
- II.Visited.insert(CurrentNode);
+
+ II.Visited.insert({CurrentNode, CurrentLevel});
II.AffectedQueue.push_back(CurrentNode);
// Discover and collect affected successors of the current node.
- VisitInsertion(DT, BUI, CurrentNode, CurrentNode->getLevel(), NCD, II);
+ VisitInsertion(DT, BUI, CurrentNode, CurrentLevel, NCD, II);
}
// Finish by updating immediate dominators and levels.
@@ -772,13 +774,17 @@ struct SemiNCAInfo {
const TreeNodePtr TN, const unsigned RootLevel,
const TreeNodePtr NCD, InsertionInfo &II) {
const unsigned NCDLevel = NCD->getLevel();
- DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << "\n");
+ DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << ", RootLevel "
+ << RootLevel << "\n");
SmallVector<TreeNodePtr, 8> Stack = {TN};
assert(TN->getBlock() && II.Visited.count(TN) && "Preconditions!");
+ SmallPtrSet<TreeNodePtr, 8> Processed;
+
do {
TreeNodePtr Next = Stack.pop_back_val();
+ DEBUG(dbgs() << " Next: " << BlockNamePrinter(Next) << "\n");
for (const NodePtr Succ :
ChildrenGetter<IsPostDom>::Get(Next->getBlock(), BUI)) {
@@ -786,19 +792,31 @@ struct SemiNCAInfo {
assert(SuccTN && "Unreachable successor found at reachable insertion");
const unsigned SuccLevel = SuccTN->getLevel();
- DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ)
- << ", level = " << SuccLevel << "\n");
+ DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ) << ", level = "
+ << SuccLevel << "\n");
+ // Do not process the same node multiple times.
+ if (Processed.count(Next) > 0)
+ continue;
+
// Succ dominated by subtree From -- not affected.
// (Based on the lemma 2.5 from the second paper.)
if (SuccLevel > RootLevel) {
DEBUG(dbgs() << "\t\tDominated by subtree From\n");
- if (II.Visited.count(SuccTN) != 0)
- continue;
+ if (II.Visited.count(SuccTN) != 0) {
+ DEBUG(dbgs() << "\t\t\talready visited at level "
+ << II.Visited[SuccTN] << "\n\t\t\tcurrent level "
+ << RootLevel << ")\n");
+ // A node can be necessary to visit again if we see it again at
+ // a lower level than before.
+ if (II.Visited[SuccTN] >= RootLevel)
+ continue;
+ }
+
DEBUG(dbgs() << "\t\tMarking visited not affected "
<< BlockNamePrinter(Succ) << "\n");
- II.Visited.insert(SuccTN);
+ II.Visited.insert({SuccTN, RootLevel});
II.VisitedNotAffectedQueue.push_back(SuccTN);
Stack.push_back(SuccTN);
} else if ((SuccLevel > NCDLevel + 1) &&
@@ -809,6 +827,8 @@ struct SemiNCAInfo {
II.Bucket.push({SuccLevel, SuccTN});
}
}
+
+ Processed.insert(Next);
} while (!Stack.empty());
}
@@ -920,21 +940,21 @@ struct SemiNCAInfo {
const NodePtr NCDBlock = DT.findNearestCommonDominator(From, To);
const TreeNodePtr NCD = DT.getNode(NCDBlock);
- // To dominates From -- nothing to do.
- if (ToTN == NCD) return;
+ // If To dominates From -- nothing to do.
+ if (ToTN != NCD) {
+ DT.DFSInfoValid = false;
- DT.DFSInfoValid = false;
+ const TreeNodePtr ToIDom = ToTN->getIDom();
+ DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
+ << BlockNamePrinter(ToIDom) << "\n");
- const TreeNodePtr ToIDom = ToTN->getIDom();
- DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
- << BlockNamePrinter(ToIDom) << "\n");
-
- // To remains reachable after deletion.
- // (Based on the caption under Figure 4. from the second paper.)
- if (FromTN != ToIDom || HasProperSupport(DT, BUI, ToTN))
- DeleteReachable(DT, BUI, FromTN, ToTN);
- else
- DeleteUnreachable(DT, BUI, ToTN);
+ // To remains reachable after deletion.
+ // (Based on the caption under Figure 4. from the second paper.)
+ if (FromTN != ToIDom || HasProperSupport(DT, BUI, ToTN))
+ DeleteReachable(DT, BUI, FromTN, ToTN);
+ else
+ DeleteUnreachable(DT, BUI, ToTN);
+ }
if (IsPostDom) UpdateRootsAfterUpdate(DT, BUI);
}
Modified: vendor/llvm/dist-release_60/include/llvm/Transforms/Vectorize/SLPVectorizer.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/Transforms/Vectorize/SLPVectorizer.h Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/include/llvm/Transforms/Vectorize/SLPVectorizer.h Wed Jan 24 20:23:48 2018 (r328362)
@@ -95,14 +95,9 @@ struct SLPVectorizerPass : public PassInfoMixin<SLPVec
bool tryToVectorizePair(Value *A, Value *B, slpvectorizer::BoUpSLP &R);
/// \brief Try to vectorize a list of operands.
- /// \@param BuildVector A list of users to ignore for the purpose of
- /// scheduling and cost estimation when NeedExtraction
- /// is false.
/// \returns true if a value was vectorized.
bool tryToVectorizeList(ArrayRef<Value *> VL, slpvectorizer::BoUpSLP &R,
- ArrayRef<Value *> BuildVector = None,
- bool AllowReorder = false,
- bool NeedExtraction = false);
+ bool AllowReorder = false);
/// \brief Try to vectorize a chain that may start at the operands of \p I.
bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R);
Modified: vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -2700,8 +2700,13 @@ class AddressingModeCombiner { (public)
// we still need to collect it due to original value is different.
// And later we will need all original values as anchors during
// finding the common Phi node.
+ // We also must reject the case when base offset is different and
+ // scale reg is not null, we cannot handle this case due to merge of
+ // different offsets will be used as ScaleReg.
if (DifferentField != ExtAddrMode::MultipleFields &&
- DifferentField != ExtAddrMode::ScaleField) {
+ DifferentField != ExtAddrMode::ScaleField &&
+ (DifferentField != ExtAddrMode::BaseOffsField ||
+ !NewAddrMode.ScaledReg)) {
AddrModes.emplace_back(NewAddrMode);
return true;
}
Modified: vendor/llvm/dist-release_60/lib/CodeGen/GlobalMerge.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/GlobalMerge.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/GlobalMerge.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -577,7 +577,8 @@ bool GlobalMerge::doInitialization(Module &M) {
for (auto &GV : M.globals()) {
// Merge is safe for "normal" internal or external globals only
if (GV.isDeclaration() || GV.isThreadLocal() ||
- GV.hasSection() || GV.hasImplicitSection())
+ GV.hasSection() || GV.hasImplicitSection() ||
+ GV.hasDLLExportStorageClass())
continue;
// It's not safe to merge globals that may be preempted
Modified: vendor/llvm/dist-release_60/lib/CodeGen/PeepholeOptimizer.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/PeepholeOptimizer.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/PeepholeOptimizer.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -719,15 +719,14 @@ bool PeepholeOptimizer::findNextSource(unsigned Reg, u
CurSrcPair = Pair;
ValueTracker ValTracker(CurSrcPair.Reg, CurSrcPair.SubReg, *MRI,
!DisableAdvCopyOpt, TII);
- ValueTrackerResult Res;
- bool ShouldRewrite = false;
- do {
- // Follow the chain of copies until we reach the top of the use-def chain
- // or find a more suitable source.
- Res = ValTracker.getNextSource();
+ // Follow the chain of copies until we find a more suitable source, a phi
+ // or have to abort.
+ while (true) {
+ ValueTrackerResult Res = ValTracker.getNextSource();
+ // Abort at the end of a chain (without finding a suitable source).
if (!Res.isValid())
- break;
+ return false;
// Insert the Def -> Use entry for the recently found source.
ValueTrackerResult CurSrcRes = RewriteMap.lookup(CurSrcPair);
@@ -763,26 +762,21 @@ bool PeepholeOptimizer::findNextSource(unsigned Reg, u
if (TargetRegisterInfo::isPhysicalRegister(CurSrcPair.Reg))
return false;
+ // Keep following the chain if the value isn't any better yet.
const TargetRegisterClass *SrcRC = MRI->getRegClass(CurSrcPair.Reg);
- ShouldRewrite = TRI->shouldRewriteCopySrc(DefRC, SubReg, SrcRC,
- CurSrcPair.SubReg);
- } while (!ShouldRewrite);
+ if (!TRI->shouldRewriteCopySrc(DefRC, SubReg, SrcRC, CurSrcPair.SubReg))
+ continue;
- // Continue looking for new sources...
- if (Res.isValid())
- continue;
+ // We currently cannot deal with subreg operands on PHI instructions
+ // (see insertPHI()).
+ if (PHICount > 0 && CurSrcPair.SubReg != 0)
+ continue;
- // Do not continue searching for a new source if the there's at least
- // one use-def which cannot be rewritten.
- if (!ShouldRewrite)
- return false;
+ // We found a suitable source, and are done with this chain.
+ break;
+ }
}
- if (PHICount >= RewritePHILimit) {
- DEBUG(dbgs() << "findNextSource: PHI limit reached\n");
- return false;
- }
-
// If we did not find a more suitable source, there is nothing to optimize.
return CurSrcPair.Reg != Reg;
}
@@ -799,6 +793,9 @@ insertPHI(MachineRegisterInfo *MRI, const TargetInstrI
assert(!SrcRegs.empty() && "No sources to create a PHI instruction?");
const TargetRegisterClass *NewRC = MRI->getRegClass(SrcRegs[0].Reg);
+ // NewRC is only correct if no subregisters are involved. findNextSource()
+ // should have rejected those cases already.
+ assert(SrcRegs[0].SubReg == 0 && "should not have subreg operand");
unsigned NewVR = MRI->createVirtualRegister(NewRC);
MachineBasicBlock *MBB = OrigPHI->getParent();
MachineInstrBuilder MIB = BuildMI(*MBB, OrigPHI, OrigPHI->getDebugLoc(),
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -3842,9 +3842,16 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
EVT ExtVT;
if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) &&
isLegalNarrowLoad(Load, ISD::ZEXTLOAD, ExtVT)) {
- // Only add this load if we can make it more narrow.
- if (ExtVT.bitsLT(Load->getMemoryVT()))
+
+ // ZEXTLOAD is already small enough.
+ if (Load->getExtensionType() == ISD::ZEXTLOAD &&
+ ExtVT.bitsGE(Load->getMemoryVT()))
+ continue;
+
+ // Use LE to convert equal sized loads to zext.
+ if (ExtVT.bitsLE(Load->getMemoryVT()))
Loads.insert(Load);
+
continue;
}
return false;
@@ -3899,11 +3906,13 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N, Se
if (Loads.size() == 0)
return false;
+ DEBUG(dbgs() << "Backwards propagate AND: "; N->dump());
SDValue MaskOp = N->getOperand(1);
// If it exists, fixup the single node we allow in the tree that needs
// masking.
if (FixupNode) {
+ DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
FixupNode->getValueType(0),
SDValue(FixupNode, 0), MaskOp);
@@ -3914,14 +3923,21 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N, Se
// Narrow any constants that need it.
for (auto *LogicN : NodesWithConsts) {
- auto *C = cast<ConstantSDNode>(LogicN->getOperand(1));
- SDValue And = DAG.getNode(ISD::AND, SDLoc(C), C->getValueType(0),
- SDValue(C, 0), MaskOp);
- DAG.UpdateNodeOperands(LogicN, LogicN->getOperand(0), And);
+ SDValue Op0 = LogicN->getOperand(0);
+ SDValue Op1 = LogicN->getOperand(1);
+
+ if (isa<ConstantSDNode>(Op0))
+ std::swap(Op0, Op1);
+
+ SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
+ Op1, MaskOp);
+
+ DAG.UpdateNodeOperands(LogicN, Op0, And);
}
// Create narrow loads.
for (auto *Load : Loads) {
+ DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
SDValue(Load, 0), MaskOp);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
@@ -5209,7 +5225,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
return SDValue();
// Loads must share the same base address
- BaseIndexOffset Ptr = BaseIndexOffset::match(L->getBasePtr(), DAG);
+ BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG);
int64_t ByteOffsetFromBase = 0;
if (!Base)
Base = Ptr;
@@ -12928,7 +12944,7 @@ void DAGCombiner::getStoreMergeCandidates(
StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes) {
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
- BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG);
+ BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
EVT MemVT = St->getMemoryVT();
SDValue Val = peekThroughBitcast(St->getValue());
@@ -12949,7 +12965,7 @@ void DAGCombiner::getStoreMergeCandidates(
EVT LoadVT;
if (IsLoadSrc) {
auto *Ld = cast<LoadSDNode>(Val);
- LBasePtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG);
+ LBasePtr = BaseIndexOffset::match(Ld, DAG);
LoadVT = Ld->getMemoryVT();
// Load and store should be the same type.
if (MemVT != LoadVT)
@@ -12968,7 +12984,7 @@ void DAGCombiner::getStoreMergeCandidates(
return false;
// The Load's Base Ptr must also match
if (LoadSDNode *OtherLd = dyn_cast<LoadSDNode>(Val)) {
- auto LPtr = BaseIndexOffset::match(OtherLd->getBasePtr(), DAG);
+ auto LPtr = BaseIndexOffset::match(OtherLd, DAG);
if (LoadVT != OtherLd->getMemoryVT())
return false;
if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
@@ -12992,7 +13008,7 @@ void DAGCombiner::getStoreMergeCandidates(
Val.getOpcode() != ISD::EXTRACT_SUBVECTOR)
return false;
}
- Ptr = BaseIndexOffset::match(Other->getBasePtr(), DAG);
+ Ptr = BaseIndexOffset::match(Other, DAG);
return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
};
@@ -13365,7 +13381,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *
if (Ld->getMemoryVT() != MemVT)
break;
- BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG);
+ BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld, DAG);
// If this is not the first ptr that we check.
int64_t LdOffset = 0;
if (LdBasePtr.getBase().getNode()) {
@@ -17432,44 +17448,46 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDN
unsigned NumBytes1 = Op1->getMemoryVT().getStoreSize();
// Check for BaseIndexOffset matching.
- BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0->getBasePtr(), DAG);
- BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1->getBasePtr(), DAG);
+ BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0, DAG);
+ BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1, DAG);
int64_t PtrDiff;
- if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff))
- return !((NumBytes0 <= PtrDiff) || (PtrDiff + NumBytes1 <= 0));
+ if (BasePtr0.getBase().getNode() && BasePtr1.getBase().getNode()) {
+ if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff))
+ return !((NumBytes0 <= PtrDiff) || (PtrDiff + NumBytes1 <= 0));
- // If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
- // able to calculate their relative offset if at least one arises
- // from an alloca. However, these allocas cannot overlap and we
- // can infer there is no alias.
- if (auto *A = dyn_cast<FrameIndexSDNode>(BasePtr0.getBase()))
- if (auto *B = dyn_cast<FrameIndexSDNode>(BasePtr1.getBase())) {
- MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
- // If the base are the same frame index but the we couldn't find a
- // constant offset, (indices are different) be conservative.
- if (A != B && (!MFI.isFixedObjectIndex(A->getIndex()) ||
- !MFI.isFixedObjectIndex(B->getIndex())))
- return false;
- }
+ // If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
+ // able to calculate their relative offset if at least one arises
+ // from an alloca. However, these allocas cannot overlap and we
+ // can infer there is no alias.
+ if (auto *A = dyn_cast<FrameIndexSDNode>(BasePtr0.getBase()))
+ if (auto *B = dyn_cast<FrameIndexSDNode>(BasePtr1.getBase())) {
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ // If the base are the same frame index but the we couldn't find a
+ // constant offset, (indices are different) be conservative.
+ if (A != B && (!MFI.isFixedObjectIndex(A->getIndex()) ||
+ !MFI.isFixedObjectIndex(B->getIndex())))
+ return false;
+ }
- bool IsFI0 = isa<FrameIndexSDNode>(BasePtr0.getBase());
- bool IsFI1 = isa<FrameIndexSDNode>(BasePtr1.getBase());
- bool IsGV0 = isa<GlobalAddressSDNode>(BasePtr0.getBase());
- bool IsGV1 = isa<GlobalAddressSDNode>(BasePtr1.getBase());
- bool IsCV0 = isa<ConstantPoolSDNode>(BasePtr0.getBase());
- bool IsCV1 = isa<ConstantPoolSDNode>(BasePtr1.getBase());
+ bool IsFI0 = isa<FrameIndexSDNode>(BasePtr0.getBase());
+ bool IsFI1 = isa<FrameIndexSDNode>(BasePtr1.getBase());
+ bool IsGV0 = isa<GlobalAddressSDNode>(BasePtr0.getBase());
+ bool IsGV1 = isa<GlobalAddressSDNode>(BasePtr1.getBase());
+ bool IsCV0 = isa<ConstantPoolSDNode>(BasePtr0.getBase());
+ bool IsCV1 = isa<ConstantPoolSDNode>(BasePtr1.getBase());
- // If of mismatched base types or checkable indices we can check
- // they do not alias.
- if ((BasePtr0.getIndex() == BasePtr1.getIndex() || (IsFI0 != IsFI1) ||
- (IsGV0 != IsGV1) || (IsCV0 != IsCV1)) &&
- (IsFI0 || IsGV0 || IsCV0) && (IsFI1 || IsGV1 || IsCV1))
- return false;
+ // If of mismatched base types or checkable indices we can check
+ // they do not alias.
+ if ((BasePtr0.getIndex() == BasePtr1.getIndex() || (IsFI0 != IsFI1) ||
+ (IsGV0 != IsGV1) || (IsCV0 != IsCV1)) &&
+ (IsFI0 || IsGV0 || IsCV0) && (IsFI1 || IsGV1 || IsCV1))
+ return false;
+ }
- // If we know required SrcValue1 and SrcValue2 have relatively large alignment
- // compared to the size and offset of the access, we may be able to prove they
- // do not alias. This check is conservative for now to catch cases created by
- // splitting vector types.
+ // If we know required SrcValue1 and SrcValue2 have relatively large
+ // alignment compared to the size and offset of the access, we may be able
+ // to prove they do not alias. This check is conservative for now to catch
+ // cases created by splitting vector types.
int64_t SrcValOffset0 = Op0->getSrcValueOffset();
int64_t SrcValOffset1 = Op1->getSrcValueOffset();
unsigned OrigAlignment0 = Op0->getOriginalAlignment();
@@ -17479,8 +17497,8 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDN
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
- // There is no overlap between these relatively aligned accesses of similar
- // size. Return no alias.
+ // There is no overlap between these relatively aligned accesses of
+ // similar size. Return no alias.
if ((OffAlign0 + NumBytes0) <= OffAlign1 ||
(OffAlign1 + NumBytes1) <= OffAlign0)
return false;
@@ -17643,7 +17661,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
- BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG);
+ BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
// We must have a base and an offset.
if (!BasePtr.getBase().getNode())
@@ -17669,7 +17687,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode
break;
// Find the base pointer and offset for this memory node.
- BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG);
+ BaseIndexOffset Ptr = BaseIndexOffset::match(Index, DAG);
// Check that the base pointer is the same as the original one.
if (!BasePtr.equalBaseIndex(Ptr, DAG))
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -2965,12 +2965,12 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::ZERO_EXTEND:
LHS = DAG.getNode(ISD::AssertZext, dl, OuterType, Res,
DAG.getValueType(AtomicType));
- RHS = DAG.getNode(ISD::ZERO_EXTEND, dl, OuterType, Node->getOperand(2));
+ RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType);
ExtRes = LHS;
break;
case ISD::ANY_EXTEND:
LHS = DAG.getZeroExtendInReg(Res, dl, AtomicType);
- RHS = DAG.getNode(ISD::ZERO_EXTEND, dl, OuterType, Node->getOperand(2));
+ RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType);
break;
default:
llvm_unreachable("Invalid atomic op extension");
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -7947,11 +7947,8 @@ bool SelectionDAG::areNonVolatileConsecutiveLoads(Load
if (VT.getSizeInBits() / 8 != Bytes)
return false;
- SDValue Loc = LD->getOperand(1);
- SDValue BaseLoc = Base->getOperand(1);
-
- auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this);
- auto LocDecomp = BaseIndexOffset::match(Loc, *this);
+ auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
+ auto LocDecomp = BaseIndexOffset::match(LD, *this);
int64_t Offset = 0;
if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -21,6 +21,9 @@ using namespace llvm;
bool BaseIndexOffset::equalBaseIndex(BaseIndexOffset &Other,
const SelectionDAG &DAG, int64_t &Off) {
+ // Conservatively fail if we a match failed..
+ if (!Base.getNode() || !Other.Base.getNode())
+ return false;
// Initial Offset difference.
Off = Other.Offset - Offset;
@@ -72,12 +75,28 @@ bool BaseIndexOffset::equalBaseIndex(BaseIndexOffset &
}
/// Parses tree in Ptr for base, index, offset addresses.
-BaseIndexOffset BaseIndexOffset::match(SDValue Ptr, const SelectionDAG &DAG) {
+BaseIndexOffset BaseIndexOffset::match(LSBaseSDNode *N,
+ const SelectionDAG &DAG) {
+ SDValue Ptr = N->getBasePtr();
+
// (((B + I*M) + c)) + c ...
SDValue Base = DAG.getTargetLoweringInfo().unwrapAddress(Ptr);
SDValue Index = SDValue();
int64_t Offset = 0;
bool IsIndexSignExt = false;
+
+ // pre-inc/pre-dec ops are components of EA.
+ if (N->getAddressingMode() == ISD::PRE_INC) {
+ if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
+ Offset += C->getSExtValue();
+ else // If unknown, give up now.
+ return BaseIndexOffset(SDValue(), SDValue(), 0, false);
+ } else if (N->getAddressingMode() == ISD::PRE_DEC) {
+ if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
+ Offset -= C->getSExtValue();
+ else // If unknown, give up now.
+ return BaseIndexOffset(SDValue(), SDValue(), 0, false);
+ }
// Consume constant adds & ors with appropriate masking.
while (Base->getOpcode() == ISD::ADD || Base->getOpcode() == ISD::OR) {
Modified: vendor/llvm/dist-release_60/lib/CodeGen/TargetLoweringBase.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/TargetLoweringBase.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/CodeGen/TargetLoweringBase.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -132,9 +132,18 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT
setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
- // Darwin 10 and higher has an optimized __bzero.
- if (!TT.isMacOSX() || !TT.isMacOSXVersionLT(10, 6) || TT.isArch64Bit()) {
- setLibcallName(RTLIB::BZERO, TT.isAArch64() ? "bzero" : "__bzero");
+ // Some darwins have an optimized __bzero/bzero function.
+ switch (TT.getArch()) {
+ case Triple::x86:
+ case Triple::x86_64:
+ if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
+ setLibcallName(RTLIB::BZERO, "__bzero");
+ break;
+ case Triple::aarch64:
+ setLibcallName(RTLIB::BZERO, "bzero");
+ break;
+ default:
+ break;
}
if (darwinHasSinCos(TT)) {
Modified: vendor/llvm/dist-release_60/lib/Linker/IRMover.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Linker/IRMover.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/Linker/IRMover.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -954,7 +954,12 @@ Expected<Constant *> IRLinker::linkGlobalValueProto(Gl
NewGV->setLinkage(GlobalValue::InternalLinkage);
Constant *C = NewGV;
- if (DGV)
+ // Only create a bitcast if necessary. In particular, with
+ // DebugTypeODRUniquing we may reach metadata in the destination module
+ // containing a GV from the source module, in which case SGV will be
+ // the same as DGV and NewGV, and TypeMap.get() will assert since it
+ // assumes it is being invoked on a type in the source module.
+ if (DGV && NewGV != SGV)
C = ConstantExpr::getBitCast(NewGV, TypeMap.get(SGV->getType()));
if (DGV && NewGV != DGV) {
Modified: vendor/llvm/dist-release_60/lib/MC/MCCodeView.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/MC/MCCodeView.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/MC/MCCodeView.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -76,6 +76,14 @@ bool CodeViewContext::addFile(MCStreamer &OS, unsigned
return true;
}
+MCCVFunctionInfo *CodeViewContext::getCVFunctionInfo(unsigned FuncId) {
+ if (FuncId >= Functions.size())
+ return nullptr;
+ if (Functions[FuncId].isUnallocatedFunctionInfo())
+ return nullptr;
+ return &Functions[FuncId];
+}
+
bool CodeViewContext::recordFunctionId(unsigned FuncId) {
if (FuncId >= Functions.size())
Functions.resize(FuncId + 1);
@@ -245,6 +253,67 @@ void CodeViewContext::emitFileChecksumOffset(MCObjectS
MCSymbolRefExpr::create(Files[Idx].ChecksumTableOffset, OS.getContext());
OS.EmitValueImpl(SRE, 4);
+}
+
+void CodeViewContext::addLineEntry(const MCCVLineEntry &LineEntry) {
+ size_t Offset = MCCVLines.size();
+ auto I = MCCVLineStartStop.insert(
+ {LineEntry.getFunctionId(), {Offset, Offset + 1}});
+ if (!I.second)
+ I.first->second.second = Offset + 1;
+ MCCVLines.push_back(LineEntry);
+}
+
+std::vector<MCCVLineEntry>
+CodeViewContext::getFunctionLineEntries(unsigned FuncId) {
+ std::vector<MCCVLineEntry> FilteredLines;
+ auto I = MCCVLineStartStop.find(FuncId);
+ if (I != MCCVLineStartStop.end()) {
+ MCCVFunctionInfo *SiteInfo = getCVFunctionInfo(FuncId);
+ for (size_t Idx = I->second.first, End = I->second.second; Idx != End;
+ ++Idx) {
+ unsigned LocationFuncId = MCCVLines[Idx].getFunctionId();
+ if (LocationFuncId == FuncId) {
+ // This was a .cv_loc directly for FuncId, so record it.
+ FilteredLines.push_back(MCCVLines[Idx]);
+ } else {
+ // Check if the current location is inlined in this function. If it is,
+ // synthesize a statement .cv_loc at the original inlined call site.
+ auto I = SiteInfo->InlinedAtMap.find(LocationFuncId);
+ if (I != SiteInfo->InlinedAtMap.end()) {
+ MCCVFunctionInfo::LineInfo &IA = I->second;
+ // Only add the location if it differs from the previous location.
+ // Large inlined calls will have many .cv_loc entries and we only need
+ // one line table entry in the parent function.
+ if (FilteredLines.empty() ||
+ FilteredLines.back().getFileNum() != IA.File ||
+ FilteredLines.back().getLine() != IA.Line ||
+ FilteredLines.back().getColumn() != IA.Col) {
+ FilteredLines.push_back(MCCVLineEntry(
+ MCCVLines[Idx].getLabel(),
+ MCCVLoc(FuncId, IA.File, IA.Line, IA.Col, false, false)));
+ }
+ }
+ }
+ }
+ }
+ return FilteredLines;
+}
+
+std::pair<size_t, size_t> CodeViewContext::getLineExtent(unsigned FuncId) {
+ auto I = MCCVLineStartStop.find(FuncId);
+ // Return an empty extent if there are no cv_locs for this function id.
+ if (I == MCCVLineStartStop.end())
+ return {~0ULL, 0};
+ return I->second;
+}
+
+ArrayRef<MCCVLineEntry> CodeViewContext::getLinesForExtent(size_t L, size_t R) {
+ if (R <= L)
+ return None;
+ if (L >= MCCVLines.size())
+ return None;
+ return makeArrayRef(&MCCVLines[L], R - L);
}
void CodeViewContext::emitLineTableForFunction(MCObjectStreamer &OS,
Modified: vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64InstructionSelector.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64InstructionSelector.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64InstructionSelector.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -868,6 +868,40 @@ bool AArch64InstructionSelector::select(MachineInstr &
if (OpFlags & AArch64II::MO_GOT) {
I.setDesc(TII.get(AArch64::LOADgot));
I.getOperand(1).setTargetFlags(OpFlags);
+ } else if (TM.getCodeModel() == CodeModel::Large) {
+ // Materialize the global using movz/movk instructions.
+ unsigned MovZDstReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ auto InsertPt = std::next(I.getIterator());
+ auto MovZ =
+ BuildMI(MBB, InsertPt, I.getDebugLoc(), TII.get(AArch64::MOVZXi))
+ .addDef(MovZDstReg);
+ MovZ->addOperand(MF, I.getOperand(1));
+ MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
+ AArch64II::MO_NC);
+ MovZ->addOperand(MF, MachineOperand::CreateImm(0));
+ constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
+
+ auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags,
+ unsigned Offset, unsigned ForceDstReg) {
+ unsigned DstReg =
+ ForceDstReg ? ForceDstReg
+ : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ auto MovI = BuildMI(MBB, InsertPt, MovZ->getDebugLoc(),
+ TII.get(AArch64::MOVKXi))
+ .addDef(DstReg)
+ .addReg(SrcReg);
+ MovI->addOperand(MF, MachineOperand::CreateGA(
+ GV, MovZ->getOperand(1).getOffset(), Flags));
+ MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
+ constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
+ return DstReg;
+ };
+ unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
+ AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
+ DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
+ BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
+ I.eraseFromParent();
+ return true;
} else {
I.setDesc(TII.get(AArch64::MOVaddr));
I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
Modified: vendor/llvm/dist-release_60/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -821,7 +821,6 @@ namespace llvm {
MutableArrayRef<int> NewMask, unsigned Options = None);
OpRef packp(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results,
MutableArrayRef<int> NewMask);
- OpRef zerous(ShuffleMask SM, OpRef Va, ResultStack &Results);
OpRef vmuxs(ArrayRef<uint8_t> Bytes, OpRef Va, OpRef Vb,
ResultStack &Results);
OpRef vmuxp(ArrayRef<uint8_t> Bytes, OpRef Va, OpRef Vb,
@@ -1137,25 +1136,6 @@ OpRef HvxSelector::packp(ShuffleMask SM, OpRef Va, OpR
}
return concat(Out[0], Out[1], Results);
-}
-
-OpRef HvxSelector::zerous(ShuffleMask SM, OpRef Va, ResultStack &Results) {
- DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';});
-
- int VecLen = SM.Mask.size();
- SmallVector<uint8_t,128> UsedBytes(VecLen);
- bool HasUnused = false;
- for (int I = 0; I != VecLen; ++I) {
- if (SM.Mask[I] != -1)
- UsedBytes[I] = 0xFF;
- else
- HasUnused = true;
- }
- if (!HasUnused)
- return Va;
- SDValue B = getVectorConstant(UsedBytes, SDLoc(Results.InpNode));
- Results.push(Hexagon::V6_vand, getSingleVT(MVT::i8), {Va, OpRef(B)});
- return OpRef::res(Results.top());
}
OpRef HvxSelector::vmuxs(ArrayRef<uint8_t> Bytes, OpRef Va, OpRef Vb,
Modified: vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCISelLowering.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCISelLowering.cpp Wed Jan 24 20:16:48 2018 (r328361)
+++ vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCISelLowering.cpp Wed Jan 24 20:23:48 2018 (r328362)
@@ -142,6 +142,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMa
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
+ // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
+
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
for (MVT VT : MVT::integer_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
@@ -1154,6 +1157,8 @@ const char *PPCTargetLowering::getTargetNodeName(unsig
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list