Skip to content

Commit

Permalink
Merge pull request #12 from llvm/main
Browse files Browse the repository at this point in the history
[pull] main from llvm:main
  • Loading branch information
devkadirselcuk authored Jul 4, 2021
2 parents f2af002 + 287d39d commit ba5501b
Show file tree
Hide file tree
Showing 38 changed files with 201 additions and 125 deletions.
1 change: 1 addition & 0 deletions clang-tools-extra/clangd/support/Threading.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "support/Context.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/Twine.h"
#include <atomic>
#include <cassert>
#include <condition_variable>
#include <future>
Expand Down
10 changes: 5 additions & 5 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11746,14 +11746,14 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
llvm::Type *Ty = Ops[1]->getType();
Value *Ptr =
CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));

Value *MaskVec = getMaskVecValue(
CGF, Ops[2],
cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());

return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
}

static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
Expand Down
7 changes: 6 additions & 1 deletion clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,11 @@ class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
CharUnits PaddingSum;
CharUnits Offset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
for (const FieldDecl *FD : RD->fields()) {
// Skip field that is a subobject of zero size, marked with
// [[no_unique_address]] or an empty bitfield, because its address can be
// set the same as the other fields addresses.
if (FD->isZeroSize(ASTContext))
continue;
// This checker only cares about the padded size of the
// field, and not the data size. If the field is a record
// with tail padding, then we won't put that number in our
Expand Down Expand Up @@ -249,7 +254,7 @@ class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
RetVal.Field = FD;
auto &Ctx = FD->getASTContext();
auto Info = Ctx.getTypeInfoInChars(FD->getType());
RetVal.Size = Info.Width;
RetVal.Size = FD->isZeroSize(Ctx) ? CharUnits::Zero() : Info.Width;
RetVal.Align = Info.Align;
assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
if (auto Max = FD->getMaxAlignment())
Expand Down
30 changes: 30 additions & 0 deletions clang/test/Analysis/padding_no_unique_address.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// RUN: %clang_analyze_cc1 -std=c++14 -triple x86_64-linux-gnu -analyzer-checker=optin.performance -analyzer-config optin.performance.Padding:AllowedPad=2 -verify %s

class Empty {}; // no-warning

// expected-warning@+1{{Excessive padding in 'struct NoUniqueAddressWarn1' (6 padding}}
struct NoUniqueAddressWarn1 {
char c1;
[[no_unique_address]] Empty empty;
int i;
char c2;
};

// expected-warning@+1{{Excessive padding in 'struct NoUniqueAddressWarn2' (6 padding}}
struct NoUniqueAddressWarn2 {
char c1;
[[no_unique_address]] Empty e1, e2;
int i;
char c2;
};

struct NoUniqueAddressNoWarn1 {
char c1;
[[no_unique_address]] Empty empty;
char c2;
};

struct NoUniqueAddressNoWarn2 {
char c1;
[[no_unique_address]] Empty e1, e2;
};
1 change: 1 addition & 0 deletions llvm/include/llvm/ADT/SmallVector.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <limits>
Expand Down
2 changes: 1 addition & 1 deletion llvm/include/llvm/Analysis/VecFuncs.def
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

#if !(defined(TLI_DEFINE_VECFUNC))
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF) {SCAL, VEC, VF},
#endif
#endif

#if defined(TLI_DEFINE_ACCELERATE_VECFUNCS)
// Accelerate framework's Vector Functions
Expand Down
2 changes: 1 addition & 1 deletion llvm/include/llvm/Analysis/VectorUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ enum class VFParamKind {
OMP_LinearPos, // declare simd linear(i:c) uniform(c)
OMP_LinearValPos, // declare simd linear(val(i:c)) uniform(c)
OMP_LinearRefPos, // declare simd linear(ref(i:c)) uniform(c)
OMP_LinearUValPos, // declare simd linear(uval(i:c)) uniform(c
OMP_LinearUValPos, // declare simd linear(uval(i:c)) uniform(c)
OMP_Uniform, // declare simd uniform(i)
GlobalPredicate, // Global logical predicate that acts on all lanes
// of the input and output mask concurrently. For
Expand Down
3 changes: 0 additions & 3 deletions llvm/include/llvm/CodeGen/MachineInstr.h
Original file line number Diff line number Diff line change
Expand Up @@ -1474,9 +1474,6 @@ class MachineInstr
///
/// If GroupNo is not NULL, it will receive the number of the operand group
/// containing OpIdx.
///
/// The flag operand is an immediate that can be decoded with methods like
/// InlineAsm::hasRegClassConstraint().
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;

/// Compute the static register class constraint for operand OpIdx.
Expand Down
4 changes: 2 additions & 2 deletions llvm/include/llvm/IR/IRBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -752,15 +752,15 @@ class IRBuilderBase {
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);

/// Create a call to Masked Load intrinsic
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");

/// Create a call to Masked Store intrinsic
CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
Value *Mask);

/// Create a call to Masked Gather intrinsic
CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr,
const Twine &Name = "");

Expand Down
2 changes: 1 addition & 1 deletion llvm/include/llvm/IR/Intrinsics.td
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ class LLVMVectorOfAnyPointersToElt<int num> : LLVMMatchType<num>;
class LLVMVectorElementType<int num> : LLVMMatchType<num>;

// Match the type of another intrinsic parameter that is expected to be a
// vector type, but change the element count to be half as many
// vector type, but change the element count to be half as many.
class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;

// Match the type of another intrinsic parameter that is expected to be a
Expand Down
21 changes: 7 additions & 14 deletions llvm/include/llvm/Support/KnownBits.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,13 @@ struct KnownBits {
/// tracking.
KnownBits sextInReg(unsigned SrcBitWidth) const;

/// Return a KnownBits with the extracted bits
/// [bitPosition,bitPosition+numBits).
/// Insert the bits from a smaller known bits starting at bitPosition.
void insertBits(const KnownBits &SubBits, unsigned BitPosition) {
Zero.insertBits(SubBits.Zero, BitPosition);
One.insertBits(SubBits.One, BitPosition);
}

/// Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const {
return KnownBits(Zero.extractBits(NumBits, BitPosition),
One.extractBits(NumBits, BitPosition));
Expand Down Expand Up @@ -370,18 +375,6 @@ struct KnownBits {
/// Determine if these known bits always give the same ICMP_SLE result.
static Optional<bool> sle(const KnownBits &LHS, const KnownBits &RHS);

/// Insert the bits from a smaller known bits starting at bitPosition.
void insertBits(const KnownBits &SubBits, unsigned BitPosition) {
Zero.insertBits(SubBits.Zero, BitPosition);
One.insertBits(SubBits.One, BitPosition);
}

/// Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) {
return KnownBits(Zero.extractBits(NumBits, BitPosition),
One.extractBits(NumBits, BitPosition));
}

/// Update known bits based on ANDing with RHS.
KnownBits &operator&=(const KnownBits &RHS);

Expand Down
1 change: 1 addition & 0 deletions llvm/include/llvm/Support/Printable.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#define LLVM_SUPPORT_PRINTABLE_H

#include <functional>
#include <utility>

namespace llvm {

Expand Down
1 change: 0 additions & 1 deletion llvm/include/llvm/Target/TargetSelectionDAG.td
Original file line number Diff line number Diff line change
Expand Up @@ -707,7 +707,6 @@ def assertsext : SDNode<"ISD::AssertSext", SDT_assert>;
def assertzext : SDNode<"ISD::AssertZext", SDT_assert>;
def assertalign : SDNode<"ISD::AssertAlign", SDT_assert>;


//===----------------------------------------------------------------------===//
// Selection DAG Condition Codes

Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Analysis/IVDescriptors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,7 @@ bool RecurrenceDescriptor::hasMultipleUsesOf(

return false;
}

bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
RecurrenceDescriptor &RedDes,
DemandedBits *DB, AssumptionCache *AC,
Expand Down
1 change: 0 additions & 1 deletion llvm/lib/Analysis/VectorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,6 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
return true;
}


bool llvm::maskIsAllOneOrUndef(Value *Mask) {
assert(isa<VectorType>(Mask->getType()) &&
isa<IntegerType>(Mask->getType()->getScalarType()) &&
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1236,6 +1236,7 @@ void DwarfDebug::beginModule(Module *M) {
if (!GVMapEntry.size() || (Expr && Expr->isConstant()))
GVMapEntry.push_back({nullptr, Expr});
}

DenseSet<DIGlobalVariable *> Processed;
for (auto *GVE : CUNode->getGlobalVariables()) {
DIGlobalVariable *GV = GVE->getVariable();
Expand Down Expand Up @@ -1553,6 +1554,7 @@ void DwarfDebug::collectVariableInfoFromMFTable(
RegVar->initializeMMI(VI.Expr, VI.Slot);
LLVM_DEBUG(dbgs() << "Created DbgVariable for " << VI.Var->getName()
<< "\n");

if (DbgVariable *DbgVar = MFVars.lookup(Var))
DbgVar->addMMIEntry(*RegVar);
else if (InfoHolder.addScopeVariable(Scope, RegVar.get())) {
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3549,7 +3549,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
}
}

// canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
// canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
if (N1.getOpcode() == ISD::VSCALE) {
const APInt &IntVal = N1.getConstantOperandAPInt(0);
return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal));
Expand Down Expand Up @@ -12031,6 +12031,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
AddToWorklist(ExtLoad.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}

// fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse() &&
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2854,6 +2854,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec);
HalfHi = DAG.getNode(N->getOpcode(), DL, HalfVT, InHiVec);
}

// Concatenate them to get the full intermediate truncation result.
EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements);
SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo,
Expand Down
36 changes: 18 additions & 18 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1397,7 +1397,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
SmallVector<SDValue, 2> ScalarParts;
for (unsigned i = 0; i != Parts; ++i)
ScalarParts.push_back(getConstant(
NewVal.lshr(i * ViaEltSizeInBits).trunc(ViaEltSizeInBits), DL,
NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
ViaEltVT, isT, isO));

return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
Expand All @@ -1412,11 +1412,10 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());

SmallVector<SDValue, 2> EltParts;
for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
EltParts.push_back(getConstant(
NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL,
NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
ViaEltVT, isT, isO));
}

// EltParts is currently in little endian order. If we actually want
// big-endian order then reverse it now.
Expand Down Expand Up @@ -1770,7 +1769,7 @@ static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
SDValue N2, ArrayRef<int> Mask) {
assert(VT.getVectorNumElements() == Mask.size() &&
"Must have the same number of vector elements as mask elements!");
"Must have the same number of vector elements as mask elements!");
assert(VT == N1.getValueType() && VT == N2.getValueType() &&
"Invalid VECTOR_SHUFFLE");

Expand Down Expand Up @@ -2858,8 +2857,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
unsigned NumSubVectors = Op.getNumOperands();
for (unsigned i = 0; i != NumSubVectors; ++i) {
APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
DemandedSub = DemandedSub.trunc(NumSubVectorElts);
APInt DemandedSub =
DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
if (!!DemandedSub) {
SDValue Sub = Op.getOperand(i);
Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
Expand Down Expand Up @@ -2955,8 +2954,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
Depth + 1);
unsigned Shifts = IsLE ? i : SubScale - 1 - i;
Known.One.insertBits(Known2.One, SubBitWidth * Shifts);
Known.Zero.insertBits(Known2.Zero, SubBitWidth * Shifts);
Known.insertBits(Known2, SubBitWidth * Shifts);
}
}

Expand All @@ -2980,8 +2978,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
if (DemandedElts[i]) {
unsigned Shifts = IsLE ? i : NumElts - 1 - i;
unsigned Offset = (Shifts % SubScale) * BitWidth;
Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
Known = KnownBits::commonBits(Known,
Known2.extractBits(BitWidth, Offset));
// If we don't know any bits, early out.
if (Known.isUnknown())
break;
Expand Down Expand Up @@ -4110,8 +4108,8 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
unsigned NumSubVectors = Op.getNumOperands();
for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
DemandedSub = DemandedSub.trunc(NumSubVectorElts);
APInt DemandedSub =
DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
if (!DemandedSub)
continue;
Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
Expand Down Expand Up @@ -5695,6 +5693,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
const APInt &Val = N1C->getAPIntValue();
return SignExtendInReg(Val, VT);
}

if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
SmallVector<SDValue, 8> Ops;
llvm::EVT OpVT = N1.getOperand(0).getValueType();
Expand Down Expand Up @@ -5832,7 +5831,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
}
break;
case ISD::EXTRACT_SUBVECTOR:
case ISD::EXTRACT_SUBVECTOR: {
EVT N1VT = N1.getValueType();
assert(VT.isVector() && N1VT.isVector() &&
"Extract subvector VTs must be vectors!");
Expand Down Expand Up @@ -5875,6 +5874,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
return N1.getOperand(1);
break;
}
}

// Perform trivial constant folding.
if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
Expand Down Expand Up @@ -10264,10 +10264,10 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
// FIXME: This does not work for vectors with elements less than 8 bits.
while (VecWidth > 8) {
unsigned HalfSize = VecWidth / 2;
APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
APInt LowValue = SplatValue.trunc(HalfSize);
APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
APInt LowUndef = SplatUndef.trunc(HalfSize);
APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize);
APInt LowValue = SplatValue.extractBits(HalfSize, 0);
APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize);
APInt LowUndef = SplatUndef.extractBits(HalfSize, 0);

// If the two halves do not match (ignoring undef bits), stop here.
if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/StackSlotColoring.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ namespace {
// OrigAlignments - Alignments of stack objects before coloring.
SmallVector<Align, 16> OrigAlignments;

// OrigSizes - Sizess of stack objects before coloring.
// OrigSizes - Sizes of stack objects before coloring.
SmallVector<unsigned, 16> OrigSizes;

// AllColors - If index is set, it's a spill slot, i.e. color.
Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/IR/AutoUpgrade.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1421,10 +1421,9 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);

// Convert the mask from an integer type to a vector of i1.
unsigned NumElts =
cast<FixedVectorType>(Passthru->getType())->getNumElements();
unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
}

static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/IR/Constants.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -442,6 +442,7 @@ Constant *Constant::getAggregateElement(unsigned Elt) const {
if (const auto *CDS = dyn_cast<ConstantDataSequential>(this))
return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt)
: nullptr;

return nullptr;
}

Expand Down
Loading

0 comments on commit ba5501b

Please sign in to comment.