// Slang `core` library // Aliases for base types typedef half float16_t; typedef float float32_t; typedef double float64_t; typedef int int32_t; typedef uint uint32_t; typedef uintptr_t size_t; typedef uintptr_t usize_t typedef intptr_t ssize_t; // Modifier for variables that must resolve to compile-time constants // as part of translation. syntax constexpr : ConstExprModifier; // Modifier for variables that should have writes be made // visible at the global-memory scope syntax globallycoherent : GloballyCoherentModifier; /// Modifier to disable inteprolation and force per-vertex passing of a varying attribute. /// /// When a varying attribute passed to the fragment shader is marked `pervertex`, it will /// not be interpolated during rasterization (similar to `nointerpolate` attributes). /// Unlike a plain `nointerpolate` attribute, this modifier indicates that the attribute /// should *only* be acccessed through the `GetAttributeAtVertex()` operation, so access its /// distinct per-vertex values. /// syntax pervertex : PerVertexModifier; /// Modifier to indicate a buffer or texture element type is /// backed by data in an unsigned normalized format. /// /// The `unorm` modifier is only valid on `float` and `vector`s /// with `float` elements. /// /// This modifier does not affect the semantics of any variable, /// parameter, or field that uses it. The semantics of a `float` /// or vector are the same with or without `unorm`. /// /// The `unorm` modifier can be used for the element type of a /// buffer or texture, to indicate that the data that is bound /// to that buffer or texture is in a matching normalized format. /// Some platforms may require a `unorm` qualifier for such buffers /// and textures, and others may operate correctly without it. /// syntax unorm : UNormModifier; /// Modifier to indicate a buffer or texture element type is /// backed by data in an signed normalized format. /// /// The `snorm` modifier is only valid on `float` and `vector`s /// with `float` elements. /// /// This modifier does not affect the semantics of any variable, /// parameter, or field that uses it. The semantics of a `float` /// or vector are the same with or without `snorm`. /// /// The `snorm` modifier can be used for the element type of a /// buffer or texture, to indicate that the data that is bound /// to that buffer or texture is in a matching normalized format. /// Some platforms may require a `unorm` qualifier for such buffers /// and textures, and others may operate correctly without it. /// syntax snorm : SNormModifier; /// Modifier to indicate that a function name should not be mangled /// by the Slang compiler. /// /// The `__extern_cpp` modifier makes a symbol to have unmangled /// name in source/output C++ code. /// syntax __extern_cpp : ExternCppModifier; /// A type that can be used as an operand for builtins [sealed] [builtin] interface __BuiltinType {} /// A type that can be used for arithmetic operations [sealed] [builtin] interface __BuiltinArithmeticType : __BuiltinType { /// Initialize from a 32-bit signed integer value. __init(int value); /// Initialize from the same type. __init(This value); } /// A type that can be used for logical/bitwise operations [sealed] [builtin] interface __BuiltinLogicalType : __BuiltinType { /// Initialize from a 32-bit signed integer value. __init(int value); } /// A type that logically has a sign (positive/negative/zero) [sealed] [builtin] interface __BuiltinSignedArithmeticType : __BuiltinArithmeticType {} /// A type that can represent integers [sealed] [builtin] interface __BuiltinIntegerType : __BuiltinArithmeticType {} /// A type that can represent non-integers [sealed] [builtin] interface __BuiltinRealType : __BuiltinSignedArithmeticType {} __attributeTarget(AggTypeDecl) attribute_syntax [__NonCopyableType] : NonCopyableTypeAttribute; __attributeTarget(FunctionDeclBase) attribute_syntax [__NoSideEffect] : NoSideEffectAttribute; /// Marks a function for forward-mode differentiation. /// i.e. the compiler will automatically generate a new function /// that computes the jacobian-vector product of the original. __attributeTarget(FunctionDeclBase) attribute_syntax [ForwardDifferentiable] : ForwardDifferentiableAttribute; /// Marks a function for backward-mode differentiation. __attributeTarget(FunctionDeclBase) attribute_syntax [BackwardDifferentiable(order:int = 0)] : BackwardDifferentiableAttribute; __attributeTarget(FunctionDeclBase) attribute_syntax [Differentiable(order:int = 0)] : BackwardDifferentiableAttribute; /// Interface to denote types as differentiable. /// Allows for user-specified differential types as /// well as automatic generation, for when the associated type /// hasn't been declared explicitly. /// Note that the requirements must currently be defined in this exact order /// since the auto-diff pass relies on the order to grab the struct keys. /// __magic_type(DifferentiableType) interface IDifferentiable { // Note: the compiler implementation requires the `Differential` associated type to be defined // before anything else. __builtin_requirement($( (int)BuiltinRequirementKind::DifferentialType) ) associatedtype Differential : IDifferentiable; __builtin_requirement($( (int)BuiltinRequirementKind::DZeroFunc) ) static Differential dzero(); __builtin_requirement($( (int)BuiltinRequirementKind::DAddFunc) ) static Differential dadd(Differential, Differential); __builtin_requirement($( (int)BuiltinRequirementKind::DMulFunc) ) __generic static Differential dmul(T, Differential); }; /// Pair type that serves to wrap the primal and /// differential types of an arbitrary type T. __generic __magic_type(DifferentialPairType) __intrinsic_type($(kIROp_DifferentialPairUserCodeType)) struct DifferentialPair : IDifferentiable { typedef DifferentialPair Differential; typedef T.Differential DifferentialElementType; __intrinsic_op($(kIROp_MakeDifferentialPairUserCode)) __init(T _primal, T.Differential _differential); property p : T { __intrinsic_op($(kIROp_DifferentialPairGetPrimalUserCode)) get; } property v : T { __intrinsic_op($(kIROp_DifferentialPairGetPrimalUserCode)) get; } property d : T.Differential { __intrinsic_op($(kIROp_DifferentialPairGetDifferentialUserCode)) get; } [__unsafeForceInlineEarly] T.Differential getDifferential() { return d; } [__unsafeForceInlineEarly] T getPrimal() { return p; } [__unsafeForceInlineEarly] static Differential dzero() { return Differential(T.dzero(), T.Differential.dzero()); } [__unsafeForceInlineEarly] static Differential dadd(Differential a, Differential b) { return Differential( T.dadd( a.p, b.p ), T.Differential.dadd(a.d, b.d)); } __generic [__unsafeForceInlineEarly] static Differential dmul(U a, Differential b) { return Differential( T.dmul(a, b.p), T.Differential.dmul(a, b.d)); } }; /// A type that uses a floating-point representation [sealed] [builtin] [TreatAsDifferentiable] interface __BuiltinFloatingPointType : __BuiltinRealType, IDifferentiable { /// Initialize from a 32-bit floating-point value. __init(float value); /// Get the value of the mathematical constant pi in this type. static This getPi(); } //@ hidden: // A type resulting from an `enum` declaration. [builtin] __magic_type(EnumTypeType) interface __EnumType { // The type of tags for this `enum` // // Note: using `__Tag` instead of `Tag` to avoid any // conflict if a user had an `enum` case called `Tag` associatedtype __Tag : __BuiltinIntegerType; }; // Use an extension to declare that every `enum` type // inherits an initializer based on the tag type. // // Note: there is an important and subtle point here. // If we declared these initializers inside the `interface` // declaration above, then they would implicitly be // *requirements* of the `__EnumType` interface, and any // type that declares conformance to it would need to // provide implementations. That would put the onus on // the semantic checker to synthesize such initializers // when conforming an `enum` type to `__EnumType` (just // as it currently synthesizes the `__Tag` requirement. // Putting the declaration in an `extension` makes them // concrete declerations rather than interface requirements. // (Admittedly, they are "concrete" declarations with // no bodies, because currently all initializers are // assumed to be intrinsics). // // TODO: It might be more accurate to express this as: // // __generic extension T { ... } // // That alternative would express an extension of every // type that conforms to `__EnumType`, rather than an // extension of `__EnumType` itself. The distinction // is subtle, and unfortunately not one the Slang type // checker is equiped to handle right now. For now we // will stick with the syntax that actually works, even // if it might be the less technically correct one. // // extension __EnumType { // TODO: this should be a single initializer using // the `__Tag` associated type from the `__EnumType` // interface, but right now the scoping for looking // up that type isn't working right. // __intrinsic_op($(kIROp_IntCast)) __init(int value); __intrinsic_op($(kIROp_IntCast)) __init(uint value); } // A type resulting from an `enum` declaration // with the `[flags]` attribute. [builtin] interface __FlagsEnumType : __EnumType { }; __generic __magic_type(ArrayExpressionType) struct Array { } // The "comma operator" is effectively just a generic function that returns its second // argument. The left-to-right evaluation order guaranteed by Slang then ensures that // `left` is evaluated before `right`. // __generic [__unsafeForceInlineEarly] U operator,(T left, U right) { return right; } // The ternary `?:` operator does not short-circuit in HLSL, and Slang no longer // follow that definition for the scalar condition overload, so this declaration just serves // for type-checking purpose only. __generic __intrinsic_op(select) T operator?:(bool condition, T ifTrue, T ifFalse); __generic __intrinsic_op(select) vector operator?:(vector condition, vector ifTrue, vector ifFalse); // Users are advised to use `select` instead if non-short-circuiting behavior is intended. __generic __intrinsic_op(select) T select(bool condition, T ifTrue, T ifFalse); __generic __intrinsic_op(select) vector select(vector condition, vector ifTrue, vector ifFalse); // Allow real-number types to be cast into each other __intrinsic_op($(kIROp_FloatCast)) T __realCast(U val); ${{{{ // We are going to use code generation to produce the // declarations for all of our base types. static const int kBaseTypeCount = sizeof(kBaseTypes) / sizeof(kBaseTypes[0]); for (int tt = 0; tt < kBaseTypeCount; ++tt) { }}}} __builtin_type($(int(kBaseTypes[tt].tag))) struct $(kBaseTypes[tt].name) : __BuiltinType ${{{{ switch (kBaseTypes[tt].tag) { case BaseType::Half: case BaseType::Float: case BaseType::Double: }}}} , __BuiltinFloatingPointType , __BuiltinRealType , __BuiltinSignedArithmeticType , __BuiltinArithmeticType ${{{{ break; case BaseType::Int8: case BaseType::Int16: case BaseType::Int: case BaseType::Int64: case BaseType::IntPtr: }}}} , __BuiltinSignedArithmeticType ${{{{ ; // fall through case BaseType::UInt8: case BaseType::UInt16: case BaseType::UInt: case BaseType::UInt64: case BaseType::UIntPtr: }}}} , __BuiltinArithmeticType , __BuiltinIntegerType ${{{{ ; // fall through case BaseType::Bool: }}}} , __BuiltinLogicalType ${{{{ break; default: break; } }}}} { ${{{{ // Declare initializers to convert from various other types for (int ss = 0; ss < kBaseTypeCount; ++ss) { // Don't allow conversion to or from `void` if (kBaseTypes[tt].tag == BaseType::Void) continue; if (kBaseTypes[ss].tag == BaseType::Void) continue; // We need to emit a modifier so that the semantic-checking // layer will know it can use these operations for implicit // conversion. ConversionCost conversionCost = getBaseTypeConversionCost( kBaseTypes[tt], kBaseTypes[ss]); IROp intrinsicOpCode = getBaseTypeConversionOp( kBaseTypes[tt], kBaseTypes[ss]); BuiltinConversionKind builtinConversionKind = kBuiltinConversion_Unknown; if (kBaseTypes[tt].tag == BaseType::Double && kBaseTypes[ss].tag == BaseType::Float) builtinConversionKind = kBuiltinConversion_FloatToDouble; }}}} __intrinsic_op($(intrinsicOpCode)) __implicit_conversion($(conversionCost), $(builtinConversionKind)) __init($(kBaseTypes[ss].name) value); ${{{{ } // If this is a basic integer type, then define explicit // initializers that take a value of an `enum` type. // // TODO: This should actually be restricted, so that this // only applies `where T.__Tag == Self`, but we don't have // the needed features in our type system to implement // that constraint right now. // switch (kBaseTypes[tt].tag) { // TODO: should this cover the full gamut of integer types? case BaseType::Int: case BaseType::UInt: }}}} __generic __intrinsic_op($(kIROp_IntCast)) __init(T value); ${{{{ break; default: break; } // If this is a floating-point type, then we need to // define the basic `getPi()` function that is used // to implement generic versions of `degrees()` and // `radians()`. // switch (kBaseTypes[tt].tag) { default: break; case BaseType::Half: case BaseType::Float: case BaseType::Double: }}}} static $(kBaseTypes[tt].name) getPi() { return $(kBaseTypes[tt].name)(3.14159265358979323846264338328); } typedef $(kBaseTypes[tt].name) Differential; [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dzero() { return Differential(0); } [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dadd(Differential a, Differential b) { return a + b; } __generic [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dmul(U a, Differential b) { return __realCast(a) * b; } ${{{{ break; } // If this is the `void` type, then we want to allow // explicit conversion to it from any other type, using // `(void) someExpression`. // if( kBaseTypes[tt].tag == BaseType::Void ) { }}}} __generic [__readNone] __intrinsic_op($(kIROp_CastToVoid)) __init(T value) {} ${{{{ } }}}} } ${{{{ } // Declare built-in pointer type // (eventually we can have the traditional syntax sugar for this) }}}} __magic_type(NullPtrType) struct NullPtr { }; __magic_type(NoneType) __intrinsic_type($(kIROp_VoidType)) struct __none_t { }; __generic __magic_type(PtrType) __intrinsic_type($(kIROp_PtrType)) struct Ptr { __generic __intrinsic_op($(kIROp_BitCast)) __init(Ptr ptr); __intrinsic_op($(kIROp_CastIntToPtr)) __init(uint64_t val); __intrinsic_op($(kIROp_CastIntToPtr)) __init(int64_t val); __subscript(int index) -> T { [__unsafeForceInlineEarly] get { return __load(__getElementPtr(this, index)); } [__unsafeForceInlineEarly] set(T newValue) { __store(__getElementPtr(this, index), newValue); } __intrinsic_op($(kIROp_GetElementPtr)) ref; } }; __intrinsic_op($(kIROp_Load)) T __load(Ptr ptr); __intrinsic_op($(kIROp_Store)) void __store(Ptr ptr, T val); __intrinsic_op($(kIROp_GetElementPtr)) Ptr __getElementPtr(Ptr ptr, int index); __intrinsic_op($(kIROp_GetElementPtr)) Ptr __getElementPtr(Ptr ptr, int64_t index); __generic __intrinsic_op($(kIROp_Less)) bool operator<(Ptr p1, Ptr p2); __generic __intrinsic_op($(kIROp_Leq)) bool operator<=(Ptr p1, Ptr p2); __generic __intrinsic_op($(kIROp_Greater)) bool operator>(Ptr p1, Ptr p2); __generic __intrinsic_op($(kIROp_Geq)) bool operator>=(Ptr p1, Ptr p2); __generic __intrinsic_op($(kIROp_Neq)) bool operator!=(Ptr p1, Ptr p2); __generic __intrinsic_op($(kIROp_Eql)) bool operator==(Ptr p1, Ptr p2); extension bool { __generic __implicit_conversion($(kConversionCost_PtrToBool)) __intrinsic_op($(kIROp_CastPtrToBool)) __init(Ptr ptr); static const bool maxValue = true; static const bool minValue = false; } extension uint64_t { __generic __intrinsic_op($(kIROp_CastPtrToInt)) __init(Ptr ptr); static const uint64_t maxValue = 0xFFFFFFFFFFFFFFFFULL; static const uint64_t minValue = 0; } extension int64_t { __generic __intrinsic_op($(kIROp_CastPtrToInt)) __init(Ptr ptr); static const int64_t maxValue = 0x7FFFFFFFFFFFFFFFLL; static const int64_t minValue = -0x8000000000000000LL; } extension intptr_t { __generic __intrinsic_op($(kIROp_CastPtrToInt)) __init(Ptr ptr); static const intptr_t maxValue = $(SLANG_PROCESSOR_X86_64?"0x7FFFFFFFFFFFFFFFz":"0x7FFFFFFFz"); static const intptr_t minValue = $(SLANG_PROCESSOR_X86_64?"0x8000000000000000z":"0x80000000z"); static const int size = $(SLANG_PROCESSOR_X86_64?"8":"4"); } extension uintptr_t { __generic __intrinsic_op($(kIROp_CastPtrToInt)) __init(Ptr ptr); static const uintptr_t maxValue = $(SLANG_PROCESSOR_X86_64?"0xFFFFFFFFFFFFFFFFz":"0xFFFFFFFFz"); static const uintptr_t minValue = 0z; static const int size = $(SLANG_PROCESSOR_X86_64?"8":"4"); } __generic __magic_type(OutType) __intrinsic_type($(kIROp_OutType)) struct Out {}; __generic __magic_type(InOutType) __intrinsic_type($(kIROp_InOutType)) struct InOut {}; __generic __magic_type(RefType) __intrinsic_type($(kIROp_RefType)) struct Ref {}; __generic __magic_type(OptionalType) __intrinsic_type($(kIROp_OptionalType)) struct Optional { property bool hasValue { __intrinsic_op($(kIROp_OptionalHasValue)) get; } property T value { __intrinsic_op($(kIROp_GetOptionalValue)) get; } __implicit_conversion($(kConversionCost_ValToOptional)) __intrinsic_op($(kIROp_MakeOptionalValue)) __init(T val); }; __generic [__unsafeForceInlineEarly] bool operator==(Optional val, __none_t noneVal) { return !val.hasValue; } __generic [__unsafeForceInlineEarly] bool operator!=(Optional val, __none_t noneVal) { return val.hasValue; } __generic [__unsafeForceInlineEarly] bool operator==(__none_t noneVal, Optional val) { return !val.hasValue; } __generic [__unsafeForceInlineEarly] bool operator!=(__none_t noneVal, Optional val) { return val.hasValue; } __generic __magic_type(NativeRefType) __intrinsic_type($(kIROp_NativePtrType)) struct NativeRef { __intrinsic_op($(kIROp_GetNativePtr)) __init(T val); }; __generic __intrinsic_op($(kIROp_ManagedPtrAttach)) void __managed_ptr_attach(__ref T val, NativeRef nativeVal); __generic [__unsafeForceInlineEarly] T __attachToNativeRef(NativeRef nativeVal) { T result; __managed_ptr_attach(result, nativeVal); return result; } __magic_type(StringType) __intrinsic_type($(kIROp_StringType)) struct String { __target_intrinsic(cpp) __intrinsic_op($(kIROp_MakeString)) __init(int val); __target_intrinsic(cpp) __intrinsic_op($(kIROp_MakeString)) __init(uint val); __target_intrinsic(cpp) __intrinsic_op($(kIROp_MakeString)) __init(int64_t val); __target_intrinsic(cpp) __intrinsic_op($(kIROp_MakeString)) __init(uint64_t val); __target_intrinsic(cpp) __intrinsic_op($(kIROp_MakeString)) __init(float val); __target_intrinsic(cpp) __intrinsic_op($(kIROp_MakeString)) __init(double val); __target_intrinsic(cpp) int64_t getLength(); property int length { get { return (int)getLength(); } } }; typedef String string; __magic_type(NativeStringType) __intrinsic_type($(kIROp_NativeStringType)) struct NativeString { __target_intrinsic(cpp, "int(strlen($0))") int getLength(); __target_intrinsic(cpp, "(void*)((const char*)($0))") Ptr getBuffer(); property int length { [__unsafeForceInlineEarly] get{return getLength();} } __intrinsic_op($(kIROp_getNativeStr)) __init(String value); }; extension Ptr { __implicit_conversion($(kConversionCost_PtrToVoidPtr)) [__unsafeForceInlineEarly] __init(NativeString nativeStr) { this = nativeStr.getBuffer(); } __generic __intrinsic_op(0) __implicit_conversion($(kConversionCost_PtrToVoidPtr)) __init(Ptr ptr); __generic __intrinsic_op(0) __implicit_conversion($(kConversionCost_PtrToVoidPtr)) __init(NativeRef ptr); } __magic_type(DynamicType) __intrinsic_type($(kIROp_DynamicType)) struct __Dynamic {}; extension half { static const half maxValue = half(65504); static const half minValue = half(-65504); } extension float { static const float maxValue = 340282346638528859811704183484516925440.0f; static const float minValue = -340282346638528859811704183484516925440.0f; } extension double { static const double maxValue = 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0; static const double minValue = -179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0; } extension int { static const int maxValue = 2147483647; static const int minValue = -2147483648; } extension uint { static const uint maxValue = 4294967295; static const uint minValue = 0; } extension int8_t { static const int8_t maxValue = 127; static const int8_t minValue = -128; } extension uint8_t { static const uint8_t maxValue = 255; static const uint8_t minValue = 0; } extension uint16_t { static const uint16_t maxValue = 65535; static const uint16_t minValue = 0; } extension int16_t { static const int16_t maxValue = 32767; static const int16_t minValue = -32768; } /// An `N` component vector with elements of type `T`. __generic __magic_type(VectorExpressionType) struct vector { /// The element type of the vector typedef T Element; /// Initialize a vector where all elements have the same scalar `value`. __implicit_conversion($(kConversionCost_ScalarToVector)) __intrinsic_op($(kIROp_MakeVectorFromScalar)) __init(T value); /// Initialize a vector from a value of the same type // TODO: we should revise semantic checking so this kind of "identity" conversion is not required __intrinsic_op(0) __init(vector value); } const int kRowMajorMatrixLayout = $(SLANG_MATRIX_LAYOUT_ROW_MAJOR); const int kColumnMajorMatrixLayout = $(SLANG_MATRIX_LAYOUT_COLUMN_MAJOR); /// A matrix with `R` rows and `C` columns, with elements of type `T`. __generic __magic_type(MatrixExpressionType) struct matrix { __intrinsic_op($(kIROp_MakeMatrixFromScalar)) __init(T val); } ${{{{ static const struct { char const* name; char const* glslPrefix; } kTypes[] = { {"half", "f16"}, {"float", ""}, {"double", "d"}, {"float16_t", "f16"}, {"float32_t", "f32"}, {"float64_t", "f64"}, {"int8_t", "i8"}, {"int16_t", "i16"}, {"int32_t", "i32"}, {"int", "i"}, {"int64_t", "i64"}, {"uint8_t", "u8"}, {"uint16_t", "u16"}, {"uint32_t", "u32"}, {"uint", "u"}, {"uint64_t", "u64"}, {"bool", "b"}, }; static const int kTypeCount = sizeof(kTypes) / sizeof(kTypes[0]); for (int tt = 0; tt < kTypeCount; ++tt) { // Declare HLSL vector types for (int ii = 1; ii <= 4; ++ii) { sb << "typedef vector<" << kTypes[tt].name << "," << ii << "> " << kTypes[tt].name << ii << ";\n"; } // Declare HLSL matrix types for (int rr = 2; rr <= 4; ++rr) for (int cc = 2; cc <= 4; ++cc) { sb << "typedef matrix<" << kTypes[tt].name << "," << rr << "," << cc << "> " << kTypes[tt].name << rr << "x" << cc << ";\n"; } } // Declare additional built-in generic types }}}} //@ public: __generic __intrinsic_type($(kIROp_ConstantBufferType)) __magic_type(ConstantBufferType) struct ConstantBuffer {} __generic __intrinsic_type($(kIROp_TextureBufferType)) __magic_type(TextureBufferType) struct TextureBuffer {} __generic __intrinsic_type($(kIROp_ParameterBlockType)) __magic_type(ParameterBlockType) struct ParameterBlock {} __generic __magic_type(VerticesType) __intrinsic_type($(kIROp_VerticesType)) struct Vertices { __subscript(uint index) -> T { // TODO: Ellie make sure these remains write only __intrinsic_op($(kIROp_GetElementPtr)) ref; } }; __generic __magic_type(IndicesType) __intrinsic_type($(kIROp_IndicesType)) struct Indices { __subscript(uint index) -> T { // TODO: Ellie: It's illegal to not write out the whole primitive at once, should we use set over ref? __intrinsic_op($(kIROp_GetElementPtr)) ref; } }; __generic __magic_type(PrimitivesType) __intrinsic_type($(kIROp_PrimitivesType)) struct Primitives { __subscript(uint index) -> T { __intrinsic_op($(kIROp_GetElementPtr)) ref; } }; //@ hidden: // Need to add constructors to the types above __generic __extension vector { __intrinsic_op($(kIROp_MakeVector)) __init(T x, T y); } __generic __extension vector { __intrinsic_op($(kIROp_MakeVector)) __init(T x, T y, T z); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(vector xy, T z); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(T x, vector yz); } __generic __extension vector { __intrinsic_op($(kIROp_MakeVector)) __init(T x, T y, T z, T w); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(vector xy, T z, T w); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(T x, vector yz, T w); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(T x, T y, vector zw); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(vector xy, vector zw); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(vector xyz, T w); [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_MakeVector)) __init(T x, vector yzw); } ${{{{ // The above extensions are generic in the *type* of the vector, // but explicit in the *size*. We will now declare an extension // for each builtin type that is generic in the size. // for (int tt = 0; tt < kBaseTypeCount; ++tt) { if(kBaseTypes[tt].tag == BaseType::Void) continue; sb << "__generic __extension vector<" << kBaseTypes[tt].name << ",N>\n{\n"; for (int ff = 0; ff < kBaseTypeCount; ++ff) { if(kBaseTypes[ff].tag == BaseType::Void) continue; if( tt != ff ) { auto cost = getBaseTypeConversionCost( kBaseTypes[tt], kBaseTypes[ff]); auto op = getBaseTypeConversionOp( kBaseTypes[tt], kBaseTypes[ff]); // Implicit conversion from a vector of the same // size, but different element type. sb << " __implicit_conversion(" << cost << ")\n"; sb << " __intrinsic_op(" << int(op) << ")\n"; sb << " __init(vector<" << kBaseTypes[ff].name << ",N> value);\n"; // Constructor to make a vector from a scalar of another type. if (cost != kConversionCost_Impossible) { cost += kConversionCost_ScalarToVector; sb << " __implicit_conversion(" << cost << ")\n"; sb << " [__unsafeForceInlineEarly]\n"; sb << " __init(" << kBaseTypes[ff].name << " value) { this = vector<" << kBaseTypes[tt].name << ",N>( " << kBaseTypes[tt].name << "(value)); }\n"; } } } sb << "}\n"; } for( int R = 2; R <= 4; ++R ) for( int C = 2; C <= 4; ++C ) { sb << "__generic __extension matrix\n{\n"; // initialize from R*C scalars sb << "__intrinsic_op(" << int(kIROp_MakeMatrix) << ") __init("; for( int ii = 0; ii < R; ++ii ) for( int jj = 0; jj < C; ++jj ) { if ((ii+jj) != 0) sb << ", "; sb << "T m" << ii << jj; } sb << ");\n"; // Initialize from R C-vectors sb << "__intrinsic_op(" << int(kIROp_MakeMatrix) << ") __init("; for (int ii = 0; ii < R; ++ii) { if(ii != 0) sb << ", "; sb << "vector row" << ii; } sb << ");\n"; // initialize from a matrix of larger size for(int rr = R; rr <= 4; ++rr) for( int cc = C; cc <= 4; ++cc ) { if(rr == R && cc == C) continue; sb << "__intrinsic_op(" << int(kIROp_MatrixReshape) << ") __init(matrix value);\n"; } sb << "}\n"; } for (int tt = 0; tt < kBaseTypeCount; ++tt) { if(kBaseTypes[tt].tag == BaseType::Void) continue; auto toType = kBaseTypes[tt].name; }}}} __generic extension matrix<$(toType),R,C> { ${{{{ for (int ff = 0; ff < kBaseTypeCount; ++ff) { if(kBaseTypes[ff].tag == BaseType::Void) continue; if( tt == ff ) continue; auto cost = getBaseTypeConversionCost( kBaseTypes[tt], kBaseTypes[ff]); auto fromType = kBaseTypes[ff].name; auto op = getBaseTypeConversionOp( kBaseTypes[tt], kBaseTypes[ff]); }}}} __implicit_conversion($(cost)) __intrinsic_op($(op)) __init(matrix<$(fromType),R,C> value); ${{{{ } }}}} } ${{{{ } }}}} __generic __intrinsic_op(0) T __slang_noop_cast(U u); __generic extension vector : IDifferentiable { typedef vector Differential; [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dzero() { return Differential(__slang_noop_cast(T.dzero())); } [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dadd(Differential a, Differential b) { return a + b; } __generic [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dmul(U a, Differential b) { return __realCast(a) * b; } } __generic extension matrix : IDifferentiable { typedef matrix Differential; [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dzero() { return matrix(__slang_noop_cast(T.dzero())); } [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dadd(Differential a, Differential b) { return a + b; } __generic [__unsafeForceInlineEarly] [BackwardDifferentiable] static Differential dmul(U a, Differential b) { return __realCast(a) * b; } } //@ public: /// Sampling state for filtered texture fetches. __magic_type(SamplerStateType, $(int(SamplerStateFlavor::SamplerState))) __intrinsic_type($(kIROp_SamplerStateType)) struct SamplerState { } /// Sampling state for filtered texture fetches that include a comparison operation before filtering. __magic_type(SamplerStateType, $(int(SamplerStateFlavor::SamplerComparisonState))) __intrinsic_type($(kIROp_SamplerComparisonStateType)) struct SamplerComparisonState { } ${{{{ for(auto& prefixInfo : kTexturePrefixes) for(auto& shapeInfo : kBaseTextureShapes) for(int isArray = 0; isArray < 2; ++isArray) for(int isMultisample = 0; isMultisample < 2; ++isMultisample) for(auto& accessInfo : kBaseTextureAccessLevels) { TextureTypeInfo info(prefixInfo, shapeInfo, isArray, isMultisample, accessInfo, sb, path); info.emitTypeDecl(); } }}}} //@ hidden: ${{{{ for (auto op : intrinsicUnaryOps) { for (auto type : kBaseTypes) { if ((type.flags & op.flags) == 0) continue; char const* resultType = type.name; if (op.flags & BOOL_RESULT) resultType = "bool"; // scalar version sb << "__prefix __intrinsic_op(" << int(op.opCode) << ") " << resultType << " operator" << op.opName << "(" << type.name << " value);\n"; sb << "__intrinsic_op(" << int(op.opCode) << ") " << resultType << " __" << op.funcName << "(" << type.name << " value);\n"; // vector version sb << "__generic "; sb << "__prefix __intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(" << "vector<" << type.name << ",N> value);\n"; // matrix version sb << "__generic "; sb << "__prefix __intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(" << "matrix<" << type.name << ",N,M> value);\n"; } // Synthesize generic versions if(op.interface) { char const* resultType = "T"; if (op.flags & BOOL_RESULT) resultType = "bool"; // scalar version sb << "__generic\n"; sb << "__prefix __intrinsic_op(" << int(op.opCode) << ") " << resultType << " operator" << op.opName << "(" << "T value);\n"; // vector version sb << "__generic "; sb << "__prefix __intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(vector value);\n"; // matrix version sb << "__generic "; sb << "__prefix __intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(matrix value);\n"; } } }}}} __generic __intrinsic_op(0) __prefix Ref operator*(Ptr value); __generic __intrinsic_op(0) __prefix Ptr operator&(__ref T value); __generic __intrinsic_op($(kIROp_GetElementPtr)) Ptr operator+(Ptr value, int64_t offset); __generic [__unsafeForceInlineEarly] Ptr operator-(Ptr value, int64_t offset) { return __getElementPtr(value, -offset); } __generic [__unsafeForceInlineEarly] __prefix T operator+(T value) { return value; } __generic [__unsafeForceInlineEarly] __prefix vector operator+(vector value) { return value; } __generic [__unsafeForceInlineEarly] __prefix matrix operator+(matrix value) { return value; } ${{{{ static const struct IncDecOpInfo { char const* name; char const* binOp; } kIncDecOps[] = { { "++", "+" }, { "--", "-" }, }; static const struct IncDecOpFixity { char const* qual; char const* bodyPrefix; char const* returnVal; } kIncDecFixities[] = { { "__prefix", "", "value" }, { "__postfix", " let result = value;", "result" }, }; for(auto op : kIncDecOps) for(auto fixity : kIncDecFixities) { }}}} $(fixity.qual) __generic [__unsafeForceInlineEarly] T operator$(op.name)(in out T value) {$(fixity.bodyPrefix) value = value $(op.binOp) T(1); return $(fixity.returnVal); } $(fixity.qual) __generic [__unsafeForceInlineEarly] vector operator$(op.name)(in out vector value) {$(fixity.bodyPrefix) value = value $(op.binOp) T(1); return $(fixity.returnVal); } $(fixity.qual) __generic [__unsafeForceInlineEarly] matrix operator$(op.name)(in out matrix value) {$(fixity.bodyPrefix) value = value $(op.binOp) T(1); return $(fixity.returnVal); } $(fixity.qual) __generic [__unsafeForceInlineEarly] Ptr operator$(op.name)(in out Ptr value) {$(fixity.bodyPrefix) value = value $(op.binOp) 1; return $(fixity.returnVal); } ${{{{ } for (auto op : intrinsicBinaryOps) { for (auto type : kBaseTypes) { if ((type.flags & op.flags) == 0) continue; char const* leftType = type.name; char const* rightType = leftType; char const* resultType = leftType; if (op.flags & BOOL_RESULT) resultType = "bool"; // TODO: We should handle a `SHIFT` flag on the op // by changing `rightType` to `int` in order to // account for the fact that the shift amount should // always have a fixed type independent of the LHS. // // (It is unclear why this change hadn't been made // already, so it is possible that such a change // breaks overload resolution or other parts of // the compiler) // scalar version sb << "__intrinsic_op(" << int(op.opCode) << ") " << resultType << " operator" << op.opName << "(" << leftType << " left, " << rightType << " right);\n"; sb << "__intrinsic_op(" << int(op.opCode) << ") " << resultType << " __" << op.funcName << "(" << leftType << " left, " << rightType << " right);\n"; // vector version sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(vector<" << leftType << ",N> left, vector<" << rightType << ",N> right);\n"; // matrix version sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(matrix<" << leftType << ",N,M> left, matrix<" << rightType << ",N,M> right);\n"; // We currently synthesize addiitonal overloads // for the case where one or the other operand // is a scalar. This choice serves a few purposes: // // 1. It avoids introducing scalar-to-vector or // scalar-to-matrix promotions before the operator, // which might allow some back ends to produce // more optimal code. // // 2. It avoids concerns about making overload resolution // and the inference rules for `N` and `M` able to // handle the mixed vector/scalar or matrix/scalar case. // // 3. Having explicit overloads for the matrix/scalar cases // here means that we do *not* need to support a general // implicit conversion from scalars to matrices, unless // we decide we want to. // // Note: Case (2) of the motivation shouldn't really apply // any more, because we end up having to support similar // inteference for built-in binary math functions where // vectors and scalars might be combined (and where defining // additional overloads to cover all the combinations doesn't // seem practical or desirable). // // TODO: We should consider whether dropping these extra // overloads is possible and worth it. The optimization // concern (1) could possibly be addressed in specific // back-ends. The issue (3) about not wanting to support // implicit scalar-to-matrix conversion may be moot if // we end up needing to support mixed scalar/matrix input // for builtin in non-operator functions anyway. // scalar-vector and scalar-matrix sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(" << leftType << " left, vector<" << rightType << ",N> right);\n"; sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(" << leftType << " left, matrix<" << rightType << ",N,M> right);\n"; // vector-scalar and matrix-scalar sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(vector<" << leftType << ",N> left, " << rightType << " right);\n"; sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(matrix<" << leftType << ",N,M> left, " << rightType << " right);\n"; } // Synthesize generic versions if(op.interface) { char const* leftType = "T"; char const* rightType = leftType; char const* resultType = leftType; if (op.flags & BOOL_RESULT) resultType = "bool"; // TODO: handle `SHIFT` // scalar version sb << "__generic\n"; sb << "__intrinsic_op(" << int(op.opCode) << ") " << resultType << " operator" << op.opName << "(" << leftType << " left, " << rightType << " right);\n"; // vector version sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(vector<" << leftType << ",N> left, vector<" << rightType << ",N> right);\n"; // matrix version sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(matrix<" << leftType << ",N,M> left, matrix<" << rightType << ",N,M> right);\n"; // scalar-vector and scalar-matrix sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(" << leftType << " left, vector<" << rightType << ",N> right);\n"; sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(" << leftType << " left, matrix<" << rightType << ",N,M> right);\n"; // vector-scalar and matrix-scalar sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") vector<" << resultType << ",N> operator" << op.opName << "(vector<" << leftType << ",N> left, " << rightType << " right);\n"; sb << "__generic "; sb << "__intrinsic_op(" << int(op.opCode) << ") matrix<" << resultType << ",N,M> operator" << op.opName << "(matrix<" << leftType << ",N,M> left, " << rightType << " right);\n"; } } // We will declare the shift operations entirely as generics // rather than try to handle all the pairings of left-hand // and right-hand side types. // static const struct ShiftOpInfo { char const* name; char const* funcName; int op; } kShiftOps[] = { { "<<", "shl", kIROp_Lsh }, { ">>", "shr", kIROp_Rsh }, }; for(auto info : kShiftOps) { }}}} __generic __intrinsic_op($(info.op)) L operator$(info.name)(L left, R right); __generic __intrinsic_op($(info.op)) L __$(info.funcName)(L left, R right); __generic [__unsafeForceInlineEarly] L operator$(info.name)=(in out L left, R right) { left = left $(info.name) right; return left; } __generic __intrinsic_op($(info.op)) vector operator$(info.name)(vector left, vector right); __generic [__unsafeForceInlineEarly] vector operator$(info.name)=(in out vector left, vector right) { left = left $(info.name) right; return left; } __generic __intrinsic_op($(info.op)) matrix operator$(info.name)(matrix left, matrix right); __generic [__unsafeForceInlineEarly] matrix operator$(info.name)=(in out matrix left, matrix right) { left = left $(info.name) right; return left; } __generic __intrinsic_op($(info.op)) vector operator$(info.name)(L left, vector right); __generic __intrinsic_op($(info.op)) matrix operator$(info.name)(L left, matrix right); __generic __intrinsic_op($(info.op)) vector operator$(info.name)(vector left, R right); __generic [__unsafeForceInlineEarly] vector operator$(info.name)=(in out vector left, R right) { left = left $(info.name) right; return left; } __generic __intrinsic_op($(info.op)) matrix operator$(info.name)(matrix left, R right); __generic [__unsafeForceInlineEarly] matrix operator$(info.name)=(in out matrix left, R right) { left = left $(info.name) right; return left; } ${{{{ } static const struct CompoundBinaryOpInfo { char const* name; char const* interface; } kCompoundBinaryOps[] = { { "+", "__BuiltinArithmeticType" }, { "-", "__BuiltinArithmeticType" }, { "*", "__BuiltinArithmeticType" }, { "/", "__BuiltinArithmeticType" }, { "%", "__BuiltinIntegerType" }, { "%", "__BuiltinFloatingPointType" }, { "&", "__BuiltinLogicalType" }, { "|", "__BuiltinLogicalType" }, { "^", "__BuiltinLogicalType" }, }; for( auto op : kCompoundBinaryOps ) { }}}} __generic [__unsafeForceInlineEarly] T operator$(op.name)=(in out T left, T right) { left = left $(op.name) right; return left; } __generic [__unsafeForceInlineEarly] vector operator$(op.name)=(in out vector left, vector right) { left = left $(op.name) right; return left; } __generic [__unsafeForceInlineEarly] vector operator$(op.name)=(in out vector left, T right) { left = left $(op.name) right; return left; } __generic [__unsafeForceInlineEarly] matrix operator$(op.name)=(in out matrix left, matrix right) { left = left $(op.name) right; return left; } __generic [__unsafeForceInlineEarly] matrix operator$(op.name)=(in out matrix left, T right) { left = left $(op.name) right; return left; } ${{{{ } }}}} //@ public: // Bit cast __generic [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_BitCast)) T bit_cast(U value); // Create Existential object __generic [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_CreateExistentialObject)) T createDynamicObject(uint typeId, U value); // Reinterpret __generic [__unsafeForceInlineEarly] __intrinsic_op($(kIROp_Reinterpret)) T reinterpret(U value); // Use an otherwise unused value // // This can be used to silence the warning about returning before initializing // an out paramter. __generic [__readNone] [ForceInline] void unused(inout T){} // Specialized function /// Given a string returns an integer hash of that string. __intrinsic_op($(kIROp_GetStringHash)) int getStringHash(String string); /// Use will produce a syntax error in downstream compiler /// Useful for testing diagnostics around compilation errors of downstream compiler /// It 'returns' an int so can be used in expressions without the front end complaining. __target_intrinsic(hlsl, " @ ") __target_intrinsic(glsl, " @ ") __target_intrinsic(cuda, " @ ") __target_intrinsic(cpp, " @ ") int __SyntaxError(); /// For downstream compilers that allow sizeof/alignof/offsetof /// Can't be called in the C/C++ style. Need to use __size_of() as opposed to sizeof(some_type). __generic __target_intrinsic(cuda, "sizeof($G0)") __target_intrinsic(cpp, "sizeof($G0)") [__readNone] int __sizeOf(); __generic __target_intrinsic(cuda, "sizeof($T0)") __target_intrinsic(cpp, "sizeof($T0)") [__readNone] int __sizeOf(T v); __generic __target_intrinsic(cuda, "SLANG_ALIGN_OF($G0)") __target_intrinsic(cpp, "SLANG_ALIGN_OF($G0)") [__readNone] int __alignOf(); __generic __target_intrinsic(cuda, "SLANG_ALIGN_OF($T0)") __target_intrinsic(cpp, "SLANG_ALIGN_OF($T0)") [__readNone] int __alignOf(T v); // It would be nice to have offsetof equivalent, but it's not clear how that would work in terms of the Slang language. // Here we allow calculating the offset of a field in bytes from an *instance* of the type. __generic __target_intrinsic(cuda, "int(((char*)&($1)) - ((char*)&($0)))") __target_intrinsic(cpp, "int(((char*)&($1)) - ((char*)&($0))") [__readNone] int __offsetOf(in T t, in F field); /// Mark beginning of "interlocked" operations in a fragment shader. __target_intrinsic(glsl, "beginInvocationInterlockARB") __glsl_extension(GL_ARB_fragment_shader_interlock) __glsl_version(420) void beginInvocationInterlock() {} /// Mark end of "interlocked" operations in a fragment shader. __target_intrinsic(glsl, "endInvocationInterlockARB") __glsl_extension(GL_ARB_fragment_shader_interlock) __glsl_version(420) void endInvocationInterlock() {} // Operators to apply to `enum` types //@ hidden: __generic __intrinsic_op($(kIROp_Eql)) bool operator==(E left, E right); __generic __intrinsic_op($(kIROp_Neq)) bool operator!=(E left, E right); //@ public: // public interfaces for generic arithmetic types. interface IComparable { bool equals(This other); bool lessThan(This other); bool lessThanOrEquals(This other); } __attributeTarget(DeclBase) attribute_syntax [TreatAsDifferentiable] : TreatAsDifferentiableAttribute; [TreatAsDifferentiable] interface IArithmetic : IComparable { This add(This other); This sub(This other); This mul(This other); This div(This other); This mod(This other); This neg(); __init(int val); static const This maxValue; static const This minValue; } interface IInteger : IArithmetic { This shl(int value); This shr(int value); This bitAnd(This other); This bitOr(This other); This bitXor(This other); This bitNot(); int toInt(); int64_t toInt64(); uint toUInt(); uint64_t toUInt64(); } interface IFloat : IArithmetic { __init(float value); float toFloat(); } __generic [__unsafeForceInlineEarly] bool operator<(T v0, T v1) { return v0.lessThan(v1); } __generic [__unsafeForceInlineEarly] bool operator>(T v0, T v1) { return v1.lessThan(v0); } __generic [__unsafeForceInlineEarly] bool operator ==(T v0, T v1) { return v0.equals(v1); } __generic [__unsafeForceInlineEarly] bool operator >=(T v0, T v1) { return v1.lessThan(v1); } __generic [__unsafeForceInlineEarly] bool operator <=(T v0, T v1) { return v0.lessThanOrEquals(v1); } __generic [__unsafeForceInlineEarly] bool operator !=(T v0, T v1) { return !v0.equals(v1); } __generic [__unsafeForceInlineEarly] T operator +(T v0, T v1) { return v0.add(v1); } __generic [__unsafeForceInlineEarly] T operator -(T v0, T v1) { return v0.sub(v1); } __generic [__unsafeForceInlineEarly] T operator *(T v0, T v1) { return v0.mul(v1); } __generic [__unsafeForceInlineEarly] T operator /(T v0, T v1) { return v0.div(v1); } __generic [__unsafeForceInlineEarly] T operator %(T v0, T v1) { return v0.mod(v1); } __generic [__unsafeForceInlineEarly] __prefix T operator -(T v0) { return v0.neg(); } __generic [__unsafeForceInlineEarly] T operator &(T v0, T v1) { return v0.bitAnd(v1); } __generic [__unsafeForceInlineEarly] T operator |(T v0, T v1) { return v0.bitOr(v1); } __generic [__unsafeForceInlineEarly] T operator ^(T v0, T v1) { return v0.bitXor(v1); } __generic [__unsafeForceInlineEarly] __prefix T operator ~(T v0) { return v0.bitNot(); } // IR level type traits. __generic __intrinsic_op($(kIROp_undefined)) T __declVal(); __generic __intrinsic_op($(kIROp_DefaultConstruct)) T __default(); __generic __intrinsic_op($(kIROp_TypeEquals)) bool __type_equals_impl(T t, U u); __generic [__unsafeForceInlineEarly] bool __type_equals(T t, U u) { return __type_equals_impl(__declVal(), __declVal()); } __generic [__unsafeForceInlineEarly] bool __type_equals() { return __type_equals_impl(__declVal(), __declVal()); } __generic __intrinsic_op($(kIROp_IsBool)) bool __isBool_impl(T t); __generic [__unsafeForceInlineEarly] bool __isBool() { return __isBool_impl(__declVal()); } __generic __intrinsic_op($(kIROp_IsInt)) bool __isInt_impl(T t); __generic [__unsafeForceInlineEarly] bool __isInt() { return __isInt_impl(__declVal()); } __generic __intrinsic_op($(kIROp_IsFloat)) bool __isFloat_impl(T t); __generic [__unsafeForceInlineEarly] bool __isFloat() { return __isFloat_impl(__declVal()); } __generic __intrinsic_op($(kIROp_IsUnsignedInt)) bool __isUnsignedInt_impl(T t); __generic [__unsafeForceInlineEarly] bool __isUnsignedInt() { return __isUnsignedInt_impl(__declVal()); } __generic __intrinsic_op($(kIROp_IsSignedInt)) bool __isSignedInt_impl(T t); __generic [__unsafeForceInlineEarly] bool __isSignedInt() { return __isSignedInt_impl(__declVal()); } __generic __intrinsic_op($(kIROp_IsVector)) bool __isVector_impl(T t); __generic [__unsafeForceInlineEarly] bool __isVector() { return __isVector_impl(__declVal()); } // Provide implementations to public generic arithmetic interfaces for builtin types. ${{{{ // Code gen integer type implementations. for (int tt = 0; tt < kBaseTypeCount; ++tt) { if (kBaseTypes[tt].flags & (SINT_MASK | UINT_MASK)) { }}}} extension $(kBaseTypes[tt].name) : IInteger { [__unsafeForceInlineEarly] bool equals(This other){return this==other;} [__unsafeForceInlineEarly] bool lessThan(This other){return this