https://github.com/JuliaLang/julia
Raw File
Tip revision: 2e358ce975029ec97aba5994c17d4a2169c3b085 authored by Tony Kelman on 19 June 2016, 17:16:52 UTC
Tag v0.4.6
Tip revision: 2e358ce
llvm-3.3.patch
diff -u -r -N llvm-3.3.src/lib/Support/Host.cpp llvm-3.3/lib/Support/Host.cpp
--- llvm-3.3.src/lib/Support/Host.cpp	2013-05-04 02:36:23.000000000 -0500
+++ llvm-3.3/lib/Support/Host.cpp	2014-06-05 15:08:41.975312974 -0500
@@ -52,8 +52,54 @@
 
 /// GetX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in the
 /// specified arguments.  If we can't run cpuid on the host, return true.
-static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX,
-                            unsigned *rEBX, unsigned *rECX, unsigned *rEDX) {
+static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
+                               unsigned *rECX, unsigned *rEDX) {
+#if defined(__GNUC__) || defined(__clang__)
+  #if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
+    // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
+    asm ("movq\t%%rbx, %%rsi\n\t"
+         "cpuid\n\t"
+         "xchgq\t%%rbx, %%rsi\n\t"
+         : "=a" (*rEAX),
+           "=S" (*rEBX),
+           "=c" (*rECX),
+           "=d" (*rEDX)
+         :  "a" (value));
+    return false;
+  #elif defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)
+    asm ("movl\t%%ebx, %%esi\n\t"
+         "cpuid\n\t"
+         "xchgl\t%%ebx, %%esi\n\t"
+         : "=a" (*rEAX),
+           "=S" (*rEBX),
+           "=c" (*rECX),
+           "=d" (*rEDX)
+         :  "a" (value));
+    return false;
+// pedantic #else returns to appease -Wunreachable-code (so we don't generate
+// postprocessed code that looks like "return true; return false;")
+  #else
+    return true;
+  #endif
+#elif defined(_MSC_VER)
+  // The MSVC intrinsic is portable across x86 and x64.
+  int registers[4];
+  __cpuid(registers, value);
+  *rEAX = registers[0];
+  *rEBX = registers[1];
+  *rECX = registers[2];
+  *rEDX = registers[3];
+  return false;
+#else
+  return true;
+#endif
+}
+
+/// GetX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return the
+/// 4 values in the specified arguments.  If we can't run cpuid on the host,
+/// return true.
+bool GetX86CpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX,
+                          unsigned *rEBX, unsigned *rECX, unsigned *rEDX) {
 #if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
   #if defined(__GNUC__)
     // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
@@ -64,16 +110,22 @@
            "=S" (*rEBX),
            "=c" (*rECX),
            "=d" (*rEDX)
-         :  "a" (value));
+         :  "a" (value),
+            "c" (subleaf));
     return false;
   #elif defined(_MSC_VER)
-    int registers[4];
-    __cpuid(registers, value);
-    *rEAX = registers[0];
-    *rEBX = registers[1];
-    *rECX = registers[2];
-    *rEDX = registers[3];
-    return false;
+    // __cpuidex was added in MSVC++ 9.0 SP1
+    #if (_MSC_VER > 1500) || (_MSC_VER == 1500 && _MSC_FULL_VER >= 150030729)
+      int registers[4];
+      __cpuidex(registers, value, subleaf);
+      *rEAX = registers[0];
+      *rEBX = registers[1];
+      *rECX = registers[2];
+      *rEDX = registers[3];
+      return false;
+    #else
+      return true;
+    #endif
   #else
     return true;
   #endif
@@ -86,11 +138,13 @@
            "=S" (*rEBX),
            "=c" (*rECX),
            "=d" (*rEDX)
-         :  "a" (value));
+         :  "a" (value),
+            "c" (subleaf));
     return false;
   #elif defined(_MSC_VER)
     __asm {
       mov   eax,value
+      mov   ecx,subleaf
       cpuid
       mov   esi,rEAX
       mov   dword ptr [esi],eax
@@ -102,8 +156,6 @@
       mov   dword ptr [esi],edx
     }
     return false;
-// pedantic #else returns to appease -Wunreachable-code (so we don't generate
-// postprocessed code that looks like "return true; return false;")
   #else
     return true;
   #endif
@@ -148,21 +200,27 @@
   unsigned Model  = 0;
   DetectX86FamilyModel(EAX, Family, Model);
 
+  union {
+    unsigned u[3];
+    char     c[12];
+  } text;
+
+  GetX86CpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1);
+
+  unsigned MaxLeaf = EAX;
   bool HasSSE3 = (ECX & 0x1);
+  bool HasSSE41 = (ECX & 0x80000);
   // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV 
   // indicates that the AVX registers will be saved and restored on context
   // switch, then we have full AVX support.
   const unsigned AVXBits = (1 << 27) | (1 << 28);
   bool HasAVX = ((ECX & AVXBits) == AVXBits) && OSHasAVXSupport();
+  bool HasAVX2 = HasAVX && MaxLeaf >= 0x7 &&
+                 !GetX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX) &&
+                 (EBX & 0x20);
   GetX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
   bool Em64T = (EDX >> 29) & 0x1;
 
-  union {
-    unsigned u[3];
-    char     c[12];
-  } text;
-
-  GetX86CpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1);
   if (memcmp(text.c, "GenuineIntel", 12) == 0) {
     switch (Family) {
     case 3:
@@ -244,7 +302,8 @@
                // 17h. All processors are manufactured using the 45 nm process.
                //
                // 45nm: Penryn , Wolfdale, Yorkfield (XE)
-        return "penryn";
+        // Not all Penryn processors support SSE 4.1 (such as the Pentium brand)
+        return HasSSE41 ? "penryn" : "core2";
 
       case 26: // Intel Core i7 processor and Intel Xeon processor. All
                // processors are manufactured using the 45 nm process.
@@ -269,10 +328,20 @@
 
       // Ivy Bridge:
       case 58:
+      case 62: // Ivy Bridge EP
         // Not all Ivy Bridge processors support AVX (such as the Pentium
         // versions instead of the i7 versions).
         return HasAVX ? "core-avx-i" : "corei7";
 
+      // Haswell:
+      case 60:
+      case 63:
+      case 69:
+      case 70:
+        // Not all Haswell processors support AVX too (such as the Pentium
+        // versions instead of the i7 versions).
+        return HasAVX2 ? "core-avx2" : "corei7";
+
       case 28: // Most 45 nm Intel Atom processors
       case 38: // 45 nm Atom Lincroft
       case 39: // 32 nm Atom Medfield
diff -u -r -N llvm-3.3.src/lib/Transforms/Vectorize/SLPVectorizer.cpp llvm-3.3/lib/Transforms/Vectorize/SLPVectorizer.cpp
--- llvm-3.3.src/lib/Transforms/Vectorize/SLPVectorizer.cpp	2013-04-30 16:04:51.000000000 -0500
+++ llvm-3.3/lib/Transforms/Vectorize/SLPVectorizer.cpp	2014-04-15 16:26:12.200527154 -0500
@@ -19,6 +19,7 @@
 #define DEBUG_TYPE SV_NAME
 
 #include "VecUtils.h"
+#include "llvm/ADT/SmallSet.h"
 #include "llvm/Transforms/Vectorize.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/ScalarEvolution.h"
@@ -128,7 +129,7 @@
   bool tryToVectorizePair(Value *A, Value *B,  BoUpSLP &R);
 
   /// \brief Try to vectorize a list of operands.
-  bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R);
+  bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, InsertElementInst *IE = 0);
 
   /// \brief Try to vectorize a chain that may start at the operands of \V;
   bool tryToVectorize(BinaryOperator *V,  BoUpSLP &R);
@@ -179,7 +180,7 @@
   return tryToVectorizeList(VL, R);
 }
 
-bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) {
+bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, InsertElementInst *IE) {
   DEBUG(dbgs()<<"SLP: Vectorizing a list of length = " << VL.size() << ".\n");
 
   // Check that all of the parts are scalar.
@@ -191,10 +192,12 @@
 
   int Cost = R.getTreeCost(VL);
   int ExtrCost = R.getScalarizationCost(VL);
-  DEBUG(dbgs()<<"SLP: Cost of pair:" << Cost <<
+  DEBUG(dbgs()<<"SLP: Cost of list:" << Cost <<
         " Cost of extract:" << ExtrCost << ".\n");
   if ((Cost+ExtrCost) >= -SLPCostThreshold) return false;
-  DEBUG(dbgs()<<"SLP: Vectorizing pair.\n");
+  DEBUG(dbgs()<<"SLP: Vectorizing list.\n");
+  if (IE)
+    R.movePrematureInserts(VL,IE);
   R.vectorizeArith(VL);
   return true;
 }
@@ -237,9 +240,49 @@
   return 0;
 }
 
+/// \brief Recognize construction of vectors like
+///  %ra = insertelement <4 x float> undef, float %s0, i32 0
+///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
+///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
+///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
+///
+/// Returns true if it matches.  Sets \p Ops to the values inserted
+/// and \p Inserts to the insertelement instructions.
+///
+static bool findBuildVector(InsertElementInst *IE,
+                            SmallVectorImpl<Value *> &Ops) {
+  if (!isa<UndefValue>(IE->getOperand(0)))
+    return false;
+
+  while (true) {
+    Ops.push_back(IE->getOperand(1));
+
+    if (IE->use_empty())
+      return false;
+
+    InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back());
+    if (!NextUse)
+      return true;
+
+    // If this isn't the final use, make sure the next insertelement is the only
+    // use. It's OK if the final constructed vector is used multiple times
+    if (!IE->hasOneUse())
+      return false;
+
+    IE = NextUse;
+  }
+
+  return false;
+}
+
 bool SLPVectorizer::vectorizeReductions(BasicBlock *BB, BoUpSLP &R) {
   bool Changed = false;
+  SmallSet<Value *, 16> VisitedInstrs;
   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
+    // We may go through BB multiple times so skip the one we have checked.
+    if (!VisitedInstrs.insert(it))
+      continue;
+
     if (isa<DbgInfoIntrinsic>(it)) continue;
 
     // Try to vectorize reductions that use PHINodes.
@@ -271,6 +314,21 @@
           Changed |= tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R);
       continue;
     }
+
+    // Try to vectorize trees that start at insertelement instructions.
+    if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) {
+      SmallVector<Value *, 8> Ops;
+      if (!findBuildVector(IE, Ops))
+        continue;
+
+      if (tryToVectorizeList(Ops, R, IE)) {
+        Changed = true;
+        it = BB->begin();
+        e = BB->end();
+      }
+
+      continue;
+    }
   }
 
   return Changed;
diff -u -r -N llvm-3.3.src/lib/Transforms/Vectorize/VecUtils.cpp llvm-3.3/lib/Transforms/Vectorize/VecUtils.cpp
--- llvm-3.3.src/lib/Transforms/Vectorize/VecUtils.cpp	2013-04-21 03:05:59.000000000 -0500
+++ llvm-3.3/lib/Transforms/Vectorize/VecUtils.cpp	2014-04-15 17:44:02.710904964 -0500
@@ -243,7 +243,8 @@
   LaneMap.clear();
   MultiUserVals.clear();
   MustScalarize.clear();
-
+  AllScalarized = true;
+ 
   // Scan the tree and find which value is used by which lane, and which values
   // must be scalarized.
   getTreeUses_rec(VL, 0);
@@ -275,7 +276,8 @@
   }
 
   // Now calculate the cost of vectorizing the tree.
-  return getTreeCost_rec(VL, 0);
+  int treeCost = getTreeCost_rec(VL, 0);
+  return AllScalarized ? max_cost : treeCost;
 }
 
 void BoUpSLP::getTreeUses_rec(ArrayRef<Value *> VL, unsigned Depth) {
@@ -380,6 +382,55 @@
   }
 }
 
+#ifndef NDEBUG
+/// \returns The opcode if all of the Instructions in \p VL have the same
+/// opcode, or zero.
+static unsigned getSameOpcode(ArrayRef<Value *> VL) {
+  Instruction *I0 = dyn_cast<Instruction>(VL[0]);
+  if (!I0)
+    return 0;
+  unsigned Opcode = I0->getOpcode();
+  for (int i = 1, e = VL.size(); i < e; i++) {
+    Instruction *I = dyn_cast<Instruction>(VL[i]);
+    if (!I || Opcode != I->getOpcode())
+      return 0;
+  }
+  return Opcode;
+}
+#endif /*NDEBUG*/
+
+/// \returns True if the ExtractElement instructions in VL can be vectorized
+/// to use the original vector.
+static bool CanReuseExtract(ArrayRef<Value *> VL) {
+  assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode");
+  // Check if all of the extracts come from the same vector and from the
+  // correct offset.
+  Value *VL0 = VL[0];
+  ExtractElementInst *E0 = cast<ExtractElementInst>(VL0);
+  Value *Vec = E0->getOperand(0);
+
+  // We have to extract from the same vector type.
+  unsigned NElts = Vec->getType()->getVectorNumElements();
+
+  if (NElts != VL.size())
+    return false;
+
+  // Check that all of the indices extract from the correct offset.
+  ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1));
+  if (!CI || CI->getZExtValue())
+    return false;
+
+  for (unsigned i = 1, e = VL.size(); i < e; ++i) {
+    ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
+    ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
+
+    if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec)
+      return false;
+  }
+
+  return true;
+}
+
 int BoUpSLP::getTreeCost_rec(ArrayRef<Value *> VL, unsigned Depth) {
   Type *ScalarTy = VL[0]->getType();
 
@@ -457,6 +508,20 @@
   }
 
   switch (Opcode) {
+  case Instruction::ExtractElement: { 
+    if (CanReuseExtract(VL)) {
+      int DeadCost = 0;
+      for (unsigned i = 0, e = VL.size(); i < e; ++i) {
+        ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
+        if (E->hasOneUse())
+          // Take credit for instruction that will become dead.
+          DeadCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
+      }
+      DEBUG(dbgs() << "SLP: taking credit of " << -DeadCost << "\n");
+      return -DeadCost;
+    }
+    return getScalarizationCost(VecTy);
+  }
   case Instruction::ZExt:
   case Instruction::SExt:
   case Instruction::FPToUI:
@@ -490,6 +555,7 @@
     VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
     int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
     Cost += (VecCost - ScalarCost);
+    AllScalarized = false;
     return Cost;
   }
   case Instruction::Add:
@@ -528,6 +594,7 @@
 
     int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy);
     Cost += (VecCost - ScalarCost);
+    AllScalarized = false;
     return Cost;
   }
   case Instruction::Load: {
@@ -540,6 +607,7 @@
     int ScalarLdCost = VecTy->getNumElements() *
       TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0);
     int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0);
+    AllScalarized = false;
     return VecLdCost - ScalarLdCost;
   }
   case Instruction::Store: {
@@ -556,6 +624,7 @@
     }
 
     int TotalCost = StoreCost + getTreeCost_rec(Operands, Depth + 1);
+    AllScalarized = false;
     return TotalCost;
   }
   default:
@@ -630,6 +699,16 @@
   }
 
   switch (Opcode) {
+  case Instruction::ExtractElement: {
+    Value *S;
+    if (CanReuseExtract(VL) ) {
+      S = VL0->getOperand(0);
+    } else {
+      S = Scalarize(VL, VecTy);
+    }
+    VectorizedValues[VL0] = S;
+    return S;
+  }
   case Instruction::ZExt:
   case Instruction::SExt:
   case Instruction::FPToUI:
@@ -727,4 +806,32 @@
   }
 }
 
+void BoUpSLP::movePrematureInserts(ArrayRef<Value *> VL, InsertElementInst *IE) {
+  Instruction *VL0 = cast<Instruction>(VL[0]);
+  int MyLastIndex = InstrIdx[VL0];
+  for (unsigned i = 1, e = VL.size(); i < e; ++i )
+    MyLastIndex = std::max(MyLastIndex, InstrIdx[VL[i]]);
+  BasicBlock *BB = cast<Instruction>(VL0)->getParent();
+  bool moved = false;
+  DEBUG(dbgs() << "SLP: Moving premature inserts\n");
+  Instruction* x = InstrVec[MyLastIndex];
+  while (IE->getParent()==BB) {
+    int UserIndex = InstrIdx[IE];
+    if (UserIndex >= MyLastIndex) {
+      // Walked past transformed region
+      break;
+    }
+    IE->removeFromParent();
+    IE->insertAfter(x);
+    DEBUG(dbgs() << "SLP:    Rescheduled: " << *IE << ".\n");
+    moved = true;  
+    x = IE;
+    IE = dyn_cast<InsertElementInst>(IE->use_back());
+    if (!IE) 
+      break;
+  } 
+  if (moved) 
+    numberInstructions(); 
+}
+
 } // end of namespace
diff -u -r -N llvm-3.3.src/lib/Transforms/Vectorize/VecUtils.h llvm-3.3/lib/Transforms/Vectorize/VecUtils.h
--- llvm-3.3.src/lib/Transforms/Vectorize/VecUtils.h	2013-04-20 04:49:10.000000000 -0500
+++ llvm-3.3/lib/Transforms/Vectorize/VecUtils.h	2014-04-15 17:38:41.549776444 -0500
@@ -77,6 +77,9 @@
   /// sequences.
   ValueList &getGatherSeqInstructions() {return GatherInstructions; }
 
+  /// \brief Move InsertElement instructions with indices preceding LastIndex
+  /// \p IE is the root of a chain identified by findBuildVector. 
+  void movePrematureInserts(ArrayRef<Value *> VL, InsertElementInst *IE);
 private:
   /// \brief This method contains the recursive part of getTreeCost.
   int getTreeCost_rec(ArrayRef<Value *> VL, unsigned Depth);
@@ -126,6 +129,8 @@
   /// by multiple lanes, or by users outside the tree.
   /// NOTICE: The vectorization methods also use this set.
   ValueSet MustScalarize;
+  /// True if all values must be scalarized, i.e. vectorization is worthless.
+  bool AllScalarized;
 
   /// Contains a list of values that are used outside the current tree. This
   /// set must be reset between runs.
diff -u -r -N llvm-3.3.src/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll llvm-3.3/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
--- llvm-3.3.src/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll	1969-12-31 18:00:00.000000000 -0600
+++ llvm-3.3/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll	2014-04-15 14:13:12.738690168 -0500
@@ -0,0 +1,53 @@
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-n8:16:32:64-S128"
+
+target triple = "x86_64-apple-macosx10.8.0"
+
+; RUN: opt -S -slp-vectorizer -slp-threshold=0 < %s | FileCheck %s 
+
+; Check that cost model for vectorization takes credit for
+; instructions that are erased.
+define <4 x float> @take_credit(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: @take_credit(
+; CHECK: %1 = fadd <4 x float> %a, %b
+  %a0 = extractelement <4 x float> %a, i32 0
+  %b0 = extractelement <4 x float> %b, i32 0
+  %c0 = fadd float %a0, %b0
+  %a1 = extractelement <4 x float> %a, i32 1
+  %b1 = extractelement <4 x float> %b, i32 1
+  %c1 = fadd float %a1, %b1
+  %a2 = extractelement <4 x float> %a, i32 2
+  %b2 = extractelement <4 x float> %b, i32 2
+  %c2 = fadd float %a2, %b2
+  %a3 = extractelement <4 x float> %a, i32 3
+  %b3 = extractelement <4 x float> %b, i32 3
+  %c3 = fadd float %a3, %b3
+  %v0 = insertelement <4 x float> undef, float %c0, i32 0
+  %v1 = insertelement <4 x float> %v0, float %c1, i32 1
+  %v2 = insertelement <4 x float> %v1, float %c2, i32 2
+  %v3 = insertelement <4 x float> %v2, float %c3, i32 3
+  ret <4 x float> %v3
+}
+
+; Make sure that vectorization happens even if extractelement operations
+; must be rescheduled.  The case here is from compiling Julia.
+define <4 x float> @reschedule_extract(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: @reschedule_extract(
+; CHECK: %1 = fadd <4 x float> %a, %b
+  %a0 = extractelement <4 x float> %a, i32 0
+  %b0 = extractelement <4 x float> %b, i32 0
+  %c0 = fadd float %a0, %b0
+  %v0 = insertelement <4 x float> undef, float %c0, i32 0
+  %a1 = extractelement <4 x float> %a, i32 1
+  %b1 = extractelement <4 x float> %b, i32 1
+  %c1 = fadd float %a1, %b1
+  %v1 = insertelement <4 x float> %v0, float %c1, i32 1
+  %a2 = extractelement <4 x float> %a, i32 2
+  %b2 = extractelement <4 x float> %b, i32 2
+  %c2 = fadd float %a2, %b2
+  %v2 = insertelement <4 x float> %v1, float %c2, i32 2
+  %a3 = extractelement <4 x float> %a, i32 3
+  %b3 = extractelement <4 x float> %b, i32 3
+  %c3 = fadd float %a3, %b3
+  %v3 = insertelement <4 x float> %v2, float %c3, i32 3
+  ret <4 x float> %v3
+}
diff -u -r -N llvm-3.3.src/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll llvm-3.3/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
--- llvm-3.3.src/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll	1969-12-31 18:00:00.000000000 -0600
+++ llvm-3.3/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll	2014-04-15 15:42:30.448560440 -0500
@@ -0,0 +1,36 @@
+; RUN: opt < %s -slp-vectorizer -o - -S -slp-threshold=-1000
+
+target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx--nvidiacl"
+
+; CTLZ cannot be vectorized currently because the second argument is a scalar
+; for both the scalar and vector forms of the intrinsic. In the future it
+; should be possible to vectorize such functions.
+; Test causes an assert if LLVM tries to vectorize CTLZ.
+
+define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
+entry:
+  %0 = extractelement <2 x i8> %x, i32 0
+  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+  %1 = extractelement <2 x i8> %x, i32 1
+  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+  ret <2 x i8> %vecinit2
+}
+
+define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
+entry:
+  %0 = extractelement <2 x i8> %x, i32 0
+  %1 = extractelement <2 x i8> %x, i32 1
+  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+  ret <2 x i8> %vecinit2
+}
+
+declare i8 @llvm.ctlz.i8(i8, i1) #3
+
+attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
back to top