git: 8e3e0a4774e1 - stable/14 - Merge llvm-project release/18.x llvmorg-18.1.0-rc3-0-g6c90f8dd5463

From: Dimitry Andric <dim_at_FreeBSD.org>
Date: Sat, 20 Apr 2024 10:32:34 UTC
The branch stable/14 has been updated by dim:

URL: https://cgit.FreeBSD.org/src/commit/?id=8e3e0a4774e1407ee08eb12b2690fc9b3977a32e

commit 8e3e0a4774e1407ee08eb12b2690fc9b3977a32e
Author:     Dimitry Andric <dim@FreeBSD.org>
AuthorDate: 2024-02-21 10:24:23 +0000
Commit:     Dimitry Andric <dim@FreeBSD.org>
CommitDate: 2024-04-19 21:14:16 +0000

    Merge llvm-project release/18.x llvmorg-18.1.0-rc3-0-g6c90f8dd5463
    
    This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and
    openmp to llvm-project release/18.x llvmorg-18.1.0-rc3-0-g6c90f8dd5463.
    
    PR:             276104
    MFC after:      1 month
    
    (cherry picked from commit 56727255ad47072ec2cc81b4ae728a099697b0e4)
---
 .../clang/lib/Serialization/ASTReaderDecl.cpp      |  10 +-
 .../clang/lib/Serialization/ASTWriter.cpp          |   6 +-
 .../clang/lib/Serialization/ASTWriterDecl.cpp      |  15 +-
 .../compiler-rt/lib/profile/InstrProfilingFile.c   |   3 +
 .../llvm/include/llvm/Analysis/ScalarEvolution.h   |   7 +
 .../llvm/lib/Analysis/ScalarEvolution.cpp          |  62 ++++
 .../llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp    |  37 +-
 .../InstCombine/InstructionCombining.cpp           |   6 +-
 .../Transforms/Utils/ScalarEvolutionExpander.cpp   |  57 +--
 .../llvm/lib/Transforms/Utils/SimplifyIndVar.cpp   |  18 +-
 .../llvm/tools/llvm-objcopy/ObjcopyOptions.cpp     |   2 +
 .../llvm/tools/llvm-objdump/ELFDump.cpp            |   3 +
 .../llvm/tools/llvm-readobj/ELFDumper.cpp          |   1 +
 .../llvm-project/openmp/runtime/src/z_AIX_asm.S    | 410 +++++++++++++++++++++
 lib/clang/include/VCSVersion.inc                   |   6 +-
 lib/clang/include/lld/Common/Version.inc           |   2 +-
 lib/clang/include/llvm/Support/VCSRevision.h       |   2 +-
 17 files changed, 558 insertions(+), 89 deletions(-)

diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index 1fadd8039462..321c11e55c14 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -800,11 +800,12 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
   BitsUnpacker EnumDeclBits(Record.readInt());
   ED->setNumPositiveBits(EnumDeclBits.getNextBits(/*Width=*/8));
   ED->setNumNegativeBits(EnumDeclBits.getNextBits(/*Width=*/8));
+  bool ShouldSkipCheckingODR = EnumDeclBits.getNextBit();
   ED->setScoped(EnumDeclBits.getNextBit());
   ED->setScopedUsingClassTag(EnumDeclBits.getNextBit());
   ED->setFixed(EnumDeclBits.getNextBit());
 
-  if (!shouldSkipCheckingODR(ED)) {
+  if (!ShouldSkipCheckingODR) {
     ED->setHasODRHash(true);
     ED->ODRHash = Record.readInt();
   }
@@ -1073,6 +1074,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
 
   FD->setCachedLinkage((Linkage)FunctionDeclBits.getNextBits(/*Width=*/3));
   FD->setStorageClass((StorageClass)FunctionDeclBits.getNextBits(/*Width=*/3));
+  bool ShouldSkipCheckingODR = FunctionDeclBits.getNextBit();
   FD->setInlineSpecified(FunctionDeclBits.getNextBit());
   FD->setImplicitlyInline(FunctionDeclBits.getNextBit());
   FD->setHasSkippedBody(FunctionDeclBits.getNextBit());
@@ -1102,7 +1104,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
   if (FD->isExplicitlyDefaulted())
     FD->setDefaultLoc(readSourceLocation());
 
-  if (!shouldSkipCheckingODR(FD)) {
+  if (!ShouldSkipCheckingODR) {
     FD->ODRHash = Record.readInt();
     FD->setHasODRHash(true);
   }
@@ -1973,6 +1975,8 @@ void ASTDeclReader::ReadCXXDefinitionData(
 
   BitsUnpacker CXXRecordDeclBits = Record.readInt();
 
+  bool ShouldSkipCheckingODR = CXXRecordDeclBits.getNextBit();
+
 #define FIELD(Name, Width, Merge)                                              \
   if (!CXXRecordDeclBits.canGetNextNBits(Width))                         \
     CXXRecordDeclBits.updateValue(Record.readInt());                           \
@@ -1982,7 +1986,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
 #undef FIELD
 
   // We only perform ODR checks for decls not in GMF.
-  if (!shouldSkipCheckingODR(D)) {
+  if (!ShouldSkipCheckingODR) {
     // Note: the caller has deserialized the IsLambda bit already.
     Data.ODRHash = Record.readInt();
     Data.HasODRHash = true;
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index 3b79a9238d1a..73018c1170d8 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -6010,6 +6010,9 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
 
   BitsPacker DefinitionBits;
 
+  bool ShouldSkipCheckingODR = shouldSkipCheckingODR(D);
+  DefinitionBits.addBit(ShouldSkipCheckingODR);
+
 #define FIELD(Name, Width, Merge)                                              \
   if (!DefinitionBits.canWriteNextNBits(Width)) {                              \
     Record->push_back(DefinitionBits);                                         \
@@ -6023,11 +6026,10 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
   Record->push_back(DefinitionBits);
 
   // We only perform ODR checks for decls not in GMF.
-  if (!shouldSkipCheckingODR(D)) {
+  if (!ShouldSkipCheckingODR)
     // getODRHash will compute the ODRHash if it has not been previously
     // computed.
     Record->push_back(D->getODRHash());
-  }
 
   bool ModulesDebugInfo =
       Writer->Context->getLangOpts().ModulesDebugInfo && !D->isDependentType();
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
index f224075643e9..e73800100e3c 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -488,13 +488,15 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
   BitsPacker EnumDeclBits;
   EnumDeclBits.addBits(D->getNumPositiveBits(), /*BitWidth=*/8);
   EnumDeclBits.addBits(D->getNumNegativeBits(), /*BitWidth=*/8);
+  bool ShouldSkipCheckingODR = shouldSkipCheckingODR(D);
+  EnumDeclBits.addBit(ShouldSkipCheckingODR);
   EnumDeclBits.addBit(D->isScoped());
   EnumDeclBits.addBit(D->isScopedUsingClassTag());
   EnumDeclBits.addBit(D->isFixed());
   Record.push_back(EnumDeclBits);
 
   // We only perform ODR checks for decls not in GMF.
-  if (!shouldSkipCheckingODR(D))
+  if (!ShouldSkipCheckingODR)
     Record.push_back(D->getODRHash());
 
   if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
@@ -678,6 +680,8 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
   // FIXME: stable encoding
   FunctionDeclBits.addBits(llvm::to_underlying(D->getLinkageInternal()), 3);
   FunctionDeclBits.addBits((uint32_t)D->getStorageClass(), /*BitWidth=*/3);
+  bool ShouldSkipCheckingODR = shouldSkipCheckingODR(D);
+  FunctionDeclBits.addBit(ShouldSkipCheckingODR);
   FunctionDeclBits.addBit(D->isInlineSpecified());
   FunctionDeclBits.addBit(D->isInlined());
   FunctionDeclBits.addBit(D->hasSkippedBody());
@@ -704,7 +708,7 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
     Record.AddSourceLocation(D->getDefaultLoc());
 
   // We only perform ODR checks for decls not in GMF.
-  if (!shouldSkipCheckingODR(D))
+  if (!ShouldSkipCheckingODR)
     Record.push_back(D->getODRHash());
 
   if (D->isDefaulted()) {
@@ -2137,12 +2141,13 @@ getFunctionDeclAbbrev(serialization::DeclCode Code) {
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 11)); // IDNS
   Abv->Add(BitCodeAbbrevOp(
       BitCodeAbbrevOp::Fixed,
-      27)); // Packed Function Bits: StorageClass, Inline, InlineSpecified,
+      28)); // Packed Function Bits: StorageClass, Inline, InlineSpecified,
             // VirtualAsWritten, Pure, HasInheritedProto, HasWrittenProto,
             // Deleted, Trivial, TrivialForCall, Defaulted, ExplicitlyDefaulted,
             // IsIneligibleOrNotSelected, ImplicitReturnZero, Constexpr,
             // UsesSEHTry, SkippedBody, MultiVersion, LateParsed,
-            // FriendConstraintRefersToEnclosingTemplate, Linkage
+            // FriendConstraintRefersToEnclosingTemplate, Linkage,
+            // ShouldSkipCheckingODR
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));    // LocEnd
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // ODRHash
   // This Array slurps the rest of the record. Fortunately we want to encode
@@ -2269,7 +2274,7 @@ void ASTWriter::WriteDeclAbbrevs() {
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // AddTypeRef
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // IntegerType
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // getPromotionType
-  Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 19)); // Enum Decl Bits
+  Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 20)); // Enum Decl Bits
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));// ODRHash
   Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // InstantiatedMembEnum
   // DC
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
index 867ae73f0d3b..f3b457d786e6 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -677,6 +677,7 @@ static void initializeProfileForContinuousMode(void) {
       PROF_ERR("Continuous counter sync mode is enabled, but raw profile is not"
                "page-aligned. CurrentFileOffset = %" PRIu64 ", pagesz = %u.\n",
                (uint64_t)CurrentFileOffset, PageSize);
+      fclose(File);
       return;
     }
     if (writeProfileWithFileObject(Filename, File) != 0) {
@@ -692,6 +693,8 @@ static void initializeProfileForContinuousMode(void) {
 
   if (doMerging()) {
     lprofUnlockFileHandle(File);
+  }
+  if (File != NULL) {
     fclose(File);
   }
 }
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
index af3ad822e0b0..0880f9c65aa4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1314,6 +1314,13 @@ public:
   void getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
                                  const SCEV *S);
 
+  /// Check whether it is poison-safe to represent the expression S using the
+  /// instruction I. If such a replacement is performed, the poison flags of
+  /// instructions in DropPoisonGeneratingInsts must be dropped.
+  bool canReuseInstruction(
+      const SCEV *S, Instruction *I,
+      SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts);
+
   class FoldID {
     const SCEV *Op = nullptr;
     const Type *Ty = nullptr;
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
index 2acb45837c48..4b2db80bc1ec 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -4184,6 +4184,68 @@ void ScalarEvolution::getPoisonGeneratingValues(
     Result.insert(SU->getValue());
 }
 
+bool ScalarEvolution::canReuseInstruction(
+    const SCEV *S, Instruction *I,
+    SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
+  // If the instruction cannot be poison, it's always safe to reuse.
+  if (programUndefinedIfPoison(I))
+    return true;
+
+  // Otherwise, it is possible that I is more poisonous that S. Collect the
+  // poison-contributors of S, and then check whether I has any additional
+  // poison-contributors. Poison that is contributed through poison-generating
+  // flags is handled by dropping those flags instead.
+  SmallPtrSet<const Value *, 8> PoisonVals;
+  getPoisonGeneratingValues(PoisonVals, S);
+
+  SmallVector<Value *> Worklist;
+  SmallPtrSet<Value *, 8> Visited;
+  Worklist.push_back(I);
+  while (!Worklist.empty()) {
+    Value *V = Worklist.pop_back_val();
+    if (!Visited.insert(V).second)
+      continue;
+
+    // Avoid walking large instruction graphs.
+    if (Visited.size() > 16)
+      return false;
+
+    // Either the value can't be poison, or the S would also be poison if it
+    // is.
+    if (PoisonVals.contains(V) || isGuaranteedNotToBePoison(V))
+      continue;
+
+    auto *I = dyn_cast<Instruction>(V);
+    if (!I)
+      return false;
+
+    // Disjoint or instructions are interpreted as adds by SCEV. However, we
+    // can't replace an arbitrary add with disjoint or, even if we drop the
+    // flag. We would need to convert the or into an add.
+    if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
+      if (PDI->isDisjoint())
+        return false;
+
+    // FIXME: Ignore vscale, even though it technically could be poison. Do this
+    // because SCEV currently assumes it can't be poison. Remove this special
+    // case once we proper model when vscale can be poison.
+    if (auto *II = dyn_cast<IntrinsicInst>(I);
+        II && II->getIntrinsicID() == Intrinsic::vscale)
+      continue;
+
+    if (canCreatePoison(cast<Operator>(I), /*ConsiderFlagsAndMetadata*/ false))
+      return false;
+
+    // If the instruction can't create poison, we can recurse to its operands.
+    if (I->hasPoisonGeneratingFlagsOrMetadata())
+      DropPoisonGeneratingInsts.push_back(I);
+
+    for (Value *Op : I->operands())
+      Worklist.push_back(Op);
+  }
+  return true;
+}
+
 const SCEV *
 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
                                          SmallVectorImpl<const SCEV *> &Ops) {
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 26ed74108ec3..18a4223d481e 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -1635,7 +1635,8 @@ class BitPermutationSelector {
     default: break;
     case ISD::ROTL:
       if (isa<ConstantSDNode>(V.getOperand(1))) {
-        unsigned RotAmt = V.getConstantOperandVal(1);
+        assert(isPowerOf2_32(NumBits) && "rotl bits should be power of 2!");
+        unsigned RotAmt = V.getConstantOperandVal(1) & (NumBits - 1);
 
         const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
 
@@ -1648,15 +1649,20 @@ class BitPermutationSelector {
     case ISD::SHL:
     case PPCISD::SHL:
       if (isa<ConstantSDNode>(V.getOperand(1))) {
-        unsigned ShiftAmt = V.getConstantOperandVal(1);
+        // sld takes 7 bits, slw takes 6.
+        unsigned ShiftAmt = V.getConstantOperandVal(1) & ((NumBits << 1) - 1);
 
         const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
 
-        for (unsigned i = ShiftAmt; i < NumBits; ++i)
-          Bits[i] = LHSBits[i - ShiftAmt];
-
-        for (unsigned i = 0; i < ShiftAmt; ++i)
-          Bits[i] = ValueBit(ValueBit::ConstZero);
+        if (ShiftAmt >= NumBits) {
+          for (unsigned i = 0; i < NumBits; ++i)
+            Bits[i] = ValueBit(ValueBit::ConstZero);
+        } else {
+          for (unsigned i = ShiftAmt; i < NumBits; ++i)
+            Bits[i] = LHSBits[i - ShiftAmt];
+          for (unsigned i = 0; i < ShiftAmt; ++i)
+            Bits[i] = ValueBit(ValueBit::ConstZero);
+        }
 
         return std::make_pair(Interesting = true, &Bits);
       }
@@ -1664,15 +1670,20 @@ class BitPermutationSelector {
     case ISD::SRL:
     case PPCISD::SRL:
       if (isa<ConstantSDNode>(V.getOperand(1))) {
-        unsigned ShiftAmt = V.getConstantOperandVal(1);
+        // srd takes lowest 7 bits, srw takes 6.
+        unsigned ShiftAmt = V.getConstantOperandVal(1) & ((NumBits << 1) - 1);
 
         const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
 
-        for (unsigned i = 0; i < NumBits - ShiftAmt; ++i)
-          Bits[i] = LHSBits[i + ShiftAmt];
-
-        for (unsigned i = NumBits - ShiftAmt; i < NumBits; ++i)
-          Bits[i] = ValueBit(ValueBit::ConstZero);
+        if (ShiftAmt >= NumBits) {
+          for (unsigned i = 0; i < NumBits; ++i)
+            Bits[i] = ValueBit(ValueBit::ConstZero);
+        } else {
+          for (unsigned i = 0; i < NumBits - ShiftAmt; ++i)
+            Bits[i] = LHSBits[i + ShiftAmt];
+          for (unsigned i = NumBits - ShiftAmt; i < NumBits; ++i)
+            Bits[i] = ValueBit(ValueBit::ConstZero);
+        }
 
         return std::make_pair(Interesting = true, &Bits);
       }
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 249f4a7710e0..5d207dcfd18d 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2594,10 +2594,10 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
         Value *V;
         if ((has_single_bit(TyAllocSize) &&
              match(GEP.getOperand(1),
-                   m_Exact(m_AShr(m_Value(V),
-                                  m_SpecificInt(countr_zero(TyAllocSize)))))) ||
+                   m_Exact(m_Shr(m_Value(V),
+                                 m_SpecificInt(countr_zero(TyAllocSize)))))) ||
             match(GEP.getOperand(1),
-                  m_Exact(m_SDiv(m_Value(V), m_SpecificInt(TyAllocSize))))) {
+                  m_Exact(m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize))))) {
           GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
               Builder.getInt8Ty(), GEP.getPointerOperand(), V);
           NewGEP->setIsInBounds(GEP.isInBounds());
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index a1d7f0f9ba0f..a3951fdf8a15 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1366,61 +1366,6 @@ Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
   return V;
 }
 
-static bool
-canReuseInstruction(ScalarEvolution &SE, const SCEV *S, Instruction *I,
-                    SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
-  // If the instruction cannot be poison, it's always safe to reuse.
-  if (programUndefinedIfPoison(I))
-    return true;
-
-  // Otherwise, it is possible that I is more poisonous that S. Collect the
-  // poison-contributors of S, and then check whether I has any additional
-  // poison-contributors. Poison that is contributed through poison-generating
-  // flags is handled by dropping those flags instead.
-  SmallPtrSet<const Value *, 8> PoisonVals;
-  SE.getPoisonGeneratingValues(PoisonVals, S);
-
-  SmallVector<Value *> Worklist;
-  SmallPtrSet<Value *, 8> Visited;
-  Worklist.push_back(I);
-  while (!Worklist.empty()) {
-    Value *V = Worklist.pop_back_val();
-    if (!Visited.insert(V).second)
-      continue;
-
-    // Avoid walking large instruction graphs.
-    if (Visited.size() > 16)
-      return false;
-
-    // Either the value can't be poison, or the S would also be poison if it
-    // is.
-    if (PoisonVals.contains(V) || isGuaranteedNotToBePoison(V))
-      continue;
-
-    auto *I = dyn_cast<Instruction>(V);
-    if (!I)
-      return false;
-
-    // FIXME: Ignore vscale, even though it technically could be poison. Do this
-    // because SCEV currently assumes it can't be poison. Remove this special
-    // case once we proper model when vscale can be poison.
-    if (auto *II = dyn_cast<IntrinsicInst>(I);
-        II && II->getIntrinsicID() == Intrinsic::vscale)
-      continue;
-
-    if (canCreatePoison(cast<Operator>(I), /*ConsiderFlagsAndMetadata*/ false))
-      return false;
-
-    // If the instruction can't create poison, we can recurse to its operands.
-    if (I->hasPoisonGeneratingFlagsOrMetadata())
-      DropPoisonGeneratingInsts.push_back(I);
-
-    for (Value *Op : I->operands())
-      Worklist.push_back(Op);
-  }
-  return true;
-}
-
 Value *SCEVExpander::FindValueInExprValueMap(
     const SCEV *S, const Instruction *InsertPt,
     SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
@@ -1448,7 +1393,7 @@ Value *SCEVExpander::FindValueInExprValueMap(
       continue;
 
     // Make sure reusing the instruction is poison-safe.
-    if (canReuseInstruction(SE, S, EntInst, DropPoisonGeneratingInsts))
+    if (SE.canReuseInstruction(S, EntInst, DropPoisonGeneratingInsts))
       return V;
     DropPoisonGeneratingInsts.clear();
   }
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 0ed3324a27b6..1b142f14d811 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -16,6 +16,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/Instructions.h"
@@ -713,8 +714,11 @@ bool SimplifyIndvar::replaceFloatIVWithIntegerIV(Instruction *UseInst) {
 bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
                                            Instruction *IVOperand) {
   if (!SE->isSCEVable(UseInst->getType()) ||
-      (UseInst->getType() != IVOperand->getType()) ||
-      (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand)))
+      UseInst->getType() != IVOperand->getType())
+    return false;
+
+  const SCEV *UseSCEV = SE->getSCEV(UseInst);
+  if (UseSCEV != SE->getSCEV(IVOperand))
     return false;
 
   // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
@@ -742,6 +746,16 @@ bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
   if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
     return false;
 
+  // Make sure the operand is not more poisonous than the instruction.
+  if (!impliesPoison(IVOperand, UseInst)) {
+    SmallVector<Instruction *> DropPoisonGeneratingInsts;
+    if (!SE->canReuseInstruction(UseSCEV, IVOperand, DropPoisonGeneratingInsts))
+      return false;
+
+    for (Instruction *I : DropPoisonGeneratingInsts)
+      I->dropPoisonGeneratingFlagsAndMetadata();
+  }
+
   LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
 
   SE->forgetValue(UseInst);
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
index f15307181fad..f63e5c61e802 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
@@ -299,6 +299,8 @@ static const StringMap<MachineInfo> TargetMap{
     // LoongArch
     {"elf32-loongarch", {ELF::EM_LOONGARCH, false, true}},
     {"elf64-loongarch", {ELF::EM_LOONGARCH, true, true}},
+    // SystemZ
+    {"elf64-s390", {ELF::EM_S390, true, false}},
 };
 
 static Expected<TargetInfo>
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/ELFDump.cpp b/contrib/llvm-project/llvm/tools/llvm-objdump/ELFDump.cpp
index 34861ee92128..fda99bd6d33e 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/ELFDump.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/ELFDump.cpp
@@ -291,6 +291,9 @@ template <class ELFT> void ELFDumper<ELFT>::printProgramHeaders() {
     case ELF::PT_OPENBSD_RANDOMIZE:
       outs() << "OPENBSD_RANDOMIZE ";
       break;
+    case ELF::PT_OPENBSD_SYSCALLS:
+      outs() << "OPENBSD_SYSCALLS ";
+      break;
     case ELF::PT_OPENBSD_WXNEEDED:
       outs() << "OPENBSD_WXNEEDED ";
       break;
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
index f369a63add11..387124ad53e4 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -1478,6 +1478,7 @@ static StringRef segmentTypeToString(unsigned Arch, unsigned Type) {
     LLVM_READOBJ_ENUM_CASE(ELF, PT_OPENBSD_RANDOMIZE);
     LLVM_READOBJ_ENUM_CASE(ELF, PT_OPENBSD_WXNEEDED);
     LLVM_READOBJ_ENUM_CASE(ELF, PT_OPENBSD_NOBTCFI);
+    LLVM_READOBJ_ENUM_CASE(ELF, PT_OPENBSD_SYSCALLS);
     LLVM_READOBJ_ENUM_CASE(ELF, PT_OPENBSD_BOOTDATA);
   default:
     return "";
diff --git a/contrib/llvm-project/openmp/runtime/src/z_AIX_asm.S b/contrib/llvm-project/openmp/runtime/src/z_AIX_asm.S
new file mode 100644
index 000000000000..d711fcb7a785
--- /dev/null
+++ b/contrib/llvm-project/openmp/runtime/src/z_AIX_asm.S
@@ -0,0 +1,410 @@
+//  z_AIX_asm.S:  - microtasking routines specifically
+//                  written for Power platforms running AIX OS
+
+//
+////===----------------------------------------------------------------------===//
+////
+//// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+//// See https://llvm.org/LICENSE.txt for license information.
+//// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+////
+////===----------------------------------------------------------------------===//
+//
+
+// -----------------------------------------------------------------------
+// macros
+// -----------------------------------------------------------------------
+
+#include "kmp_config.h"
+
+#if KMP_OS_AIX
+//------------------------------------------------------------------------
+// int
+// __kmp_invoke_microtask( void (*pkfn) (int *gtid, int *tid, ...),
+//                         int gtid, int tid,
+//                         int argc, void *p_argv[]
+// #if OMPT_SUPPORT
+//                         ,
+//                         void **exit_frame_ptr
+// #endif
+//                       ) {
+// #if OMPT_SUPPORT
+//   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
+// #endif
+//
+//   (*pkfn)( & gtid, & tid, p_argv[0], ... );
+//
+// // FIXME: This is done at call-site and can be removed here.
+// #if OMPT_SUPPORT
+//   *exit_frame_ptr = 0;
+// #endif
+//
+//   return 1;
+// }
+//
+// parameters:
+//   r3: pkfn
+//   r4: gtid
+//   r5: tid
+//   r6: argc
+//   r7: p_argv
+//   r8: &exit_frame
+//
+// return:  r3 (always 1/TRUE)
+//
+
+#if KMP_ARCH_PPC64_XCOFF
+
+    .globl  __kmp_invoke_microtask[DS]
+    .globl  .__kmp_invoke_microtask
+    .align  4
+    .csect __kmp_invoke_microtask[DS],3
+    .vbyte  8, .__kmp_invoke_microtask
+    .vbyte  8, TOC[TC0]
+    .vbyte  8, 0
+    .csect .text[PR],2
+    .machine "pwr7"
+.__kmp_invoke_microtask:
+
+
+// -- Begin __kmp_invoke_microtask
+// mark_begin;
+
+// We need to allocate a stack frame large enough to hold all of the parameters
+// on the stack for the microtask plus what this function needs. That's 48
+// bytes under the XCOFF64 ABI, plus max(64, 8*(2 + argc)) for
+// the parameters to the microtask (gtid, tid, argc elements of p_argv),
+// plus 8 bytes to store the values of r4 and r5, and 8 bytes to store r31.
+// With OMP-T support, we need an additional 8 bytes to save r30 to hold
+// a copy of r8.
+// Stack offsets relative to stack pointer:
+//   r31: -8, r30: -16, gtid: -20, tid: -24
+
+    mflr 0
+    std 31, -8(1)      # Save r31 to the stack
+    std 0, 16(1)       # Save LR to the linkage area
+
+// This is unusual because normally we'd set r31 equal to r1 after the stack
+// frame is established. In this case, however, we need to dynamically compute
+// the stack frame size, and so we keep a direct copy of r1 to access our
+// register save areas and restore the r1 value before returning.
+    mr 31, 1
+
+// Compute the size of the "argc" portion of the parameter save area.
+// The parameter save area is always at least 64 bytes long (i.e. 8 regs)
+// The microtask has (2 + argc) parameters, so if argc <= 6, we need to
+// to allocate 8*6 bytes, not 8*argc.
+    li 0, 6
+    cmpwi 0, 6, 6
+    iselgt 0, 6, 0     # r0 = (argc > 6)? argc : 6
+    sldi 0, 0, 3       # r0 = 8 * max(argc, 6)
+
+// Compute the size necessary for the local stack frame.
+// 88 = 48 + 4 (for r4) + 4 (for r5) + 8 (for r31) + 8 (for OMP-T r30) +
+//      8 (parameter gtid) + 8 (parameter tid)
+    li 12, 88
+    add 12, 0, 12
+    neg 12, 12
+
+// We need to make sure that the stack frame stays aligned (to 16 bytes).
+    li 0, -16
+    and 12, 0, 12
+
+// Establish the local stack frame.
+    stdux 1, 1, 12
+
+#if OMPT_SUPPORT
+    std 30, -16(31)    # Save r30 to the stack
+    std 1, 0(8)
+    mr 30, 8
+#endif
+
+// Store gtid and tid to the stack because they're passed by reference to the microtask.
+    stw 4, -20(31)     # Save gtid to the stack
+    stw 5, -24(31)     # Save tid to the stack
+
+    mr 12, 6           # r12 = argc
+    mr 4, 7            # r4 = p_argv
+
+    cmpwi 0, 12, 1
+    blt 0, .Lcall      # if (argc < 1) goto .Lcall
+
+    ld 5, 0(4)         # r5 = p_argv[0]
+
+    cmpwi 0, 12, 2
+    blt 0, .Lcall      # if (argc < 2) goto .Lcall
+
+    ld 6, 8(4)         # r6 = p_argv[1]
+
+    cmpwi 0, 12, 3
+    blt 0, .Lcall      # if (argc < 3) goto .Lcall
+
+    ld 7, 16(4)        # r7 = p_argv[2]
+
+    cmpwi 0, 12, 4
+    blt 0, .Lcall      # if (argc < 4) goto .Lcall
+
+    ld 8, 24(4)        # r8 = p_argv[3]
+
+    cmpwi 0, 12, 5
+    blt 0, .Lcall      # if (argc < 5) goto .Lcall
+
+    ld 9, 32(4)        # r9 = p_argv[4]
+
+    cmpwi 0, 12, 6
+    blt 0, .Lcall      # if (argc < 6) goto .Lcall
+
+    ld 10, 40(4)       # r10 = p_argv[5]
+
+    cmpwi 0, 12, 7
+    blt 0, .Lcall      # if (argc < 7) goto .Lcall
+
+// There are more than 6 microtask parameters, so we need to store the
+// remainder to the stack.
+    addi 12, 12, -6    # argc -= 6
+    mtctr 12
+
+// These are set to 8 bytes before the first desired store address (we're using
+// pre-increment loads and stores in the loop below). The parameter save area
+// for the microtask begins 48 + 8*8 == 112 bytes above r1 for XCOFF64.
+    addi 4, 4, 40      # p_argv = p_argv + 5
+                       # (i.e. skip the 5 elements we already processed)
+    addi 12, 1, 104    # r12 = stack offset (112 - 8)
+
+.Lnext:
+    ldu 0, 8(4)
+    stdu 0, 8(12)
+    bdnz .Lnext
+
+.Lcall:
+    std 2, 40(1)     # Save the TOC pointer to the linkage area
+// Load the actual function address from the function descriptor.
+    ld 12, 0(3)      # Function address
+    ld 2, 8(3)       # TOC pointer
+    ld 11, 16(3)     # Environment pointer
+
+    addi 3, 31, -20  # r3 = &gtid
+    addi 4, 31, -24  # r4 = &tid
+
+    mtctr 12         # CTR = function address
+    bctrl            # Branch to CTR
+    ld 2, 40(1)      # Restore TOC pointer from linkage area
+
+#if OMPT_SUPPORT
+    li 3, 0
+    std 3, 0(30)
+#endif
+
+    li 3, 1
+
+#if OMPT_SUPPORT
+    ld 30, -16(31)   # Restore r30 from the saved value on the stack
+#endif
+
+    mr 1, 31
+    ld 31, -8(1)     # Restore r31 from the saved value on the stack
+    ld 0, 16(1)
+    mtlr 0           # Restore LR from the linkage area
+    blr              # Branch to LR
+
+#else  // KMP_ARCH_PPC_XCOFF
+
+    .globl  __kmp_invoke_microtask[DS]
+    .globl  .__kmp_invoke_microtask
+    .align  4
+    .csect __kmp_invoke_microtask[DS],2
+    .vbyte  4, .__kmp_invoke_microtask
+    .vbyte  4, TOC[TC0]
+    .vbyte  4, 0
+    .csect .text[PR],2
+    .machine "pwr7"
+.__kmp_invoke_microtask:
+
+
+// -- Begin __kmp_invoke_microtask
+// mark_begin;
+
+// We need to allocate a stack frame large enough to hold all of the parameters
+// on the stack for the microtask plus what this function needs. That's 24
+// bytes under the XCOFF ABI, plus max(32, 8*(2 + argc)) for
+// the parameters to the microtask (gtid, tid, argc elements of p_argv),
+// plus 8 bytes to store the values of r4 and r5, and 4 bytes to store r31.
+// With OMP-T support, we need an additional 4 bytes to save r30 to hold
+// a copy of r8.
+// Stack offsets relative to stack pointer:
+//   r31: -4, r30: -8, gtid: -12, tid: -16
+
+    mflr 0
+    stw 31, -4(1)      # Save r31 to the stack
+    stw 0, 8(1)        # Save LR to the linkage area
+
+// This is unusual because normally we'd set r31 equal to r1 after the stack
+// frame is established. In this case, however, we need to dynamically compute
+// the stack frame size, and so we keep a direct copy of r1 to access our
+// register save areas and restore the r1 value before returning.
+    mr 31, 1
+
+// Compute the size of the "argc" portion of the parameter save area.
+// The parameter save area is always at least 32 bytes long (i.e. 8 regs)
+// The microtask has (2 + argc) parameters, so if argc <= 6, we need to
+// to allocate 4*6 bytes, not 4*argc.
+    li 0, 6
+    cmpwi 0, 6, 6
+    iselgt 0, 6, 0     # r0 = (argc > 6)? argc : 6
+    slwi 0, 0, 2       # r0 = 4 * max(argc, 6)
+
+// Compute the size necessary for the local stack frame.
+// 56 = 32 + 4 (for r4) + 4 (for r5) + 4 (for r31) + 4 (for OMP-T r30) +
+//      4 (parameter gtid) + 4 (parameter tid)
+    li 12, 56
+    add 12, 0, 12
+    neg 12, 12
+
+// We need to make sure that the stack frame stays aligned (to 16 bytes).
+    li 0, -16
+    and 12, 0, 12
+
+// Establish the local stack frame.
+    stwux 1, 1, 12
+
+#if OMPT_SUPPORT
+    stw 30, -8(31)     # Save r30 to the stack
+    stw 1, 0(8)
+    mr 30, 8
+#endif
+
+// Store gtid and tid to the stack because they're passed by reference to the microtask.
+    stw 4, -12(31)     # Save gtid to the stack
+    stw 5, -16(31)     # Save tid to the stack
+
+    mr 12, 6           # r12 = argc
+    mr 4, 7            # r4 = p_argv
+
+    cmpwi 0, 12, 1
+    blt 0, .Lcall      # if (argc < 1) goto .Lcall
+
+    lwz 5, 0(4)        # r5 = p_argv[0]
+
+    cmpwi 0, 12, 2
+    blt 0, .Lcall      # if (argc < 2) goto .Lcall
+
+    lwz 6, 4(4)        # r6 = p_argv[1]
+
+    cmpwi 0, 12, 3
+    blt 0, .Lcall      # if (argc < 3) goto .Lcall
+
+    lwz 7, 8(4)        # r7 = p_argv[2]
+
+    cmpwi 0, 12, 4
+    blt 0, .Lcall      # if (argc < 4) goto .Lcall
+
+    lwz 8, 12(4)       # r8 = p_argv[3]
+
+    cmpwi 0, 12, 5
+    blt 0, .Lcall      # if (argc < 5) goto .Lcall
+
+    lwz 9, 16(4)       # r9 = p_argv[4]
+
+    cmpwi 0, 12, 6
+    blt 0, .Lcall      # if (argc < 6) goto .Lcall
+
+    lwz 10, 20(4)      # r10 = p_argv[5]
+
+    cmpwi 0, 12, 7
+    blt 0, .Lcall      # if (argc < 7) goto .Lcall
+
+// There are more than 6 microtask parameters, so we need to store the
+// remainder to the stack.
+    addi 12, 12, -6    # argc -= 6
+    mtctr 12
+
+// These are set to 4 bytes before the first desired store address (we're using
+// pre-increment loads and stores in the loop below). The parameter save area
+// for the microtask begins 24 + 4*8 == 56 bytes above r1 for XCOFF.
+    addi 4, 4, 20      # p_argv = p_argv + 5
+                       # (i.e. skip the 5 elements we already processed)
+    addi 12, 1, 52     # r12 = stack offset (56 - 4)
+
+.Lnext:
+    lwzu 0, 4(4)
+    stwu 0, 4(12)
+    bdnz .Lnext
+
+.Lcall:
+    stw 2, 20(1)     # Save the TOC pointer to the linkage area
+// Load the actual function address from the function descriptor.
+    lwz 12, 0(3)     # Function address
+    lwz 2, 4(3)      # TOC pointer
+    lwz 11, 8(3)     # Environment pointer
+
+    addi 3, 31, -12  # r3 = &gtid
+    addi 4, 31, -16  # r4 = &tid
+
+    mtctr 12         # CTR = function address
+    bctrl            # Branch to CTR
+    lwz 2, 20(1)     # Restore TOC pointer from linkage area
+
+#if OMPT_SUPPORT
+    li 3, 0
+    stw 3, 0(30)
+#endif
+
+    li 3, 1
+
+#if OMPT_SUPPORT
+    lwz 30, -8(31)   # Restore r30 from the saved value on the stack
+#endif
+
+    mr 1, 31
+    lwz 31, -4(1)    # Restore r31 from the saved value on the stack
+    lwz 0, 8(1)
+    mtlr 0           # Restore LR from the linkage area
+    blr              # Branch to LR
+
+#endif // KMP_ARCH_PPC64_XCOFF
+
+.Lfunc_end0:
+    .vbyte  4, 0x00000000           # Traceback table begin
+    .byte   0x00                    # Version = 0
+    .byte   0x09                    # Language = CPlusPlus
+    .byte   0x20                    # -IsGlobaLinkage, -IsOutOfLineEpilogOrPrologue
+                                    # +HasTraceBackTableOffset, -IsInternalProcedure
+                                    # -HasControlledStorage, -IsTOCless
+                                    # -IsFloatingPointPresent
+                                    # -IsFloatingPointOperationLogOrAbortEnabled
+    .byte   0x61                    # -IsInterruptHandler, +IsFunctionNamePresent, +IsAllocaUsed
+                                    # OnConditionDirective = 0, -IsCRSaved, +IsLRSaved
+    .byte   0x80                    # +IsBackChainStored, -IsFixup, NumOfFPRsSaved = 0
+#if OMPT_SUPPORT
+    .byte   0x02                    # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 2
+    .byte   0x06                    # NumberOfFixedParms = 6
+#else
+    .byte   0x01                    # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 1
+    .byte   0x05                    # NumberOfFixedParms = 5
+#endif
+    .byte   0x01                    # NumberOfFPParms = 0, +HasParmsOnStack
+    .vbyte  4, 0x00000000           # Parameter type = i, i, i, i, i
+    .vbyte  4, .Lfunc_end0-.__kmp_invoke_microtask # Function size
+    .vbyte  2, 0x0016               # Function name len = 22
+    .byte   "__kmp_invoke_microtask" # Function Name
+    .byte   0x1f                    # AllocaRegister = 31
+                                    # -- End function
+
+// -- End  __kmp_invoke_microtask
+
+// Support for unnamed common blocks.
+
+    .comm .gomp_critical_user_, 32, 3
+#if KMP_ARCH_PPC64_XCOFF
+    .csect __kmp_unnamed_critical_addr[RW],3
+#else
+    .csect __kmp_unnamed_critical_addr[RW],2
+#endif
+    .globl __kmp_unnamed_critical_addr[RW]
+    .ptr .gomp_critical_user_
+
+// -- End unnamed common block
+
+    .toc
+
+#endif // KMP_OS_AIX
diff --git a/lib/clang/include/VCSVersion.inc b/lib/clang/include/VCSVersion.inc
index fd0a56bce1b7..1dd3c861dbb2 100644
--- a/lib/clang/include/VCSVersion.inc
+++ b/lib/clang/include/VCSVersion.inc
@@ -1,8 +1,8 @@
-#define LLVM_REVISION "llvmorg-18.1.0-rc2-53-gc7b0a6ecd442"
+#define LLVM_REVISION "llvmorg-18.1.0-rc3-0-g6c90f8dd5463"
 #define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
 
-#define CLANG_REVISION "llvmorg-18.1.0-rc2-53-gc7b0a6ecd442"
+#define CLANG_REVISION "llvmorg-18.1.0-rc3-0-g6c90f8dd5463"
 #define CLANG_REPOSITORY "https://github.com/llvm/llvm-project.git"
 
-#define LLDB_REVISION "llvmorg-18.1.0-rc2-53-gc7b0a6ecd442"
+#define LLDB_REVISION "llvmorg-18.1.0-rc3-0-g6c90f8dd5463"
 #define LLDB_REPOSITORY "https://github.com/llvm/llvm-project.git"
diff --git a/lib/clang/include/lld/Common/Version.inc b/lib/clang/include/lld/Common/Version.inc
index 153e86106f12..002ec1eecc00 100644
--- a/lib/clang/include/lld/Common/Version.inc
+++ b/lib/clang/include/lld/Common/Version.inc
@@ -1,4 +1,4 @@
 // Local identifier in __FreeBSD_version style
 #define LLD_FREEBSD_VERSION 1400006
 
-#define LLD_VERSION_STRING "18.1.0 (FreeBSD llvmorg-18.1.0-rc2-53-gc7b0a6ecd442-" __XSTRING(LLD_FREEBSD_VERSION) ")"
+#define LLD_VERSION_STRING "18.1.0 (FreeBSD llvmorg-18.1.0-rc3-0-g6c90f8dd5463-" __XSTRING(LLD_FREEBSD_VERSION) ")"
diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h
index d06ac507357e..6950d566ff2f 100644
--- a/lib/clang/include/llvm/Support/VCSRevision.h
+++ b/lib/clang/include/llvm/Support/VCSRevision.h
@@ -1,2 +1,2 @@
-#define LLVM_REVISION "llvmorg-18.1.0-rc2-53-gc7b0a6ecd442"
*** 2 LINES SKIPPED ***