// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.md file in the project root for full license information. // #pragma once #include "GPUMatrix.h" #include "CPUSparseMatrix.h" #include namespace Microsoft { namespace MSR { namespace CNTK { //GPU Sparse Matrix, using cuSPARSE library. //By default we are assuming CSR representation // NOTE m_elemSizeAllocated (in base matrix) means the number of non-zero elements we have allocated space // We are packing the CSR format (pointed to by m_pArray) as follows: // ElemType elements[m_elemSizeAllocated] // int colIdx[m_elemSizeAllocated] // int rowIdxStart[m_numRows+1] template class MATH_API GPUSparseMatrix : public BaseMatrix { public: typedef BaseMatrix Base; using Base::m_numRows; using Base::m_numCols; using Base::m_pArray; using Base::m_elemSizeAllocated; using Base::m_sliceViewOffset; using Base::m_nz; using Base::m_format; using Base::m_computeDevice; using Base::m_externalBuffer; using Base::OwnBuffer; using Base::GetFormat; using Base::SetFormat; using Base::GetNumRows; using Base::GetNumCols; using Base::SetComputeDeviceId; using Base::SetNzCount; using Base::Clear; // without this, base members would require to use thi-> in GCC public: using Base::IsEmpty; GPUSparseMatrix(const size_t numRows, const size_t numCols, const size_t numNZ, DEVICEID_TYPE computeDevice, const MatrixFormat matrixFormat = MatrixFormat::matrixFormatSparseCSR); explicit GPUSparseMatrix(DEVICEID_TYPE computeDevice, const MatrixFormat matrixFormat = MatrixFormat::matrixFormatSparseCSR); GPUSparseMatrix(const GPUSparseMatrix&); GPUSparseMatrix(const GPUMatrix&, const MatrixFormat matrixFormat = MatrixFormat::matrixFormatSparseCSR); // #ifndef __unix__ GPUSparseMatrix(GPUSparseMatrix&&); // #endif /* LINUX */ ~GPUSparseMatrix(); public: void Reset(); public: // return col pointer, which is immediately following the non-zero element // in memory format is always in the following order: // Non-zero data elements, Full index locations, compressed index locations // In CSR row data is compressed, in CSC col data is compressed // Special Note: for the matrix may be a read-only column slice view of another // matrix (only supported for CSC format today) and hence the NzValues needs // to be offset accordingly. inline const ElemType* NzValues() const { return m_format != matrixFormatSparseCSC ? m_pArray : m_pArray + SecondaryIndexValueAt(0); } inline ElemType* NzValues() { return m_format != matrixFormatSparseCSC ? m_pArray : m_pArray + SecondaryIndexValueAt(0); } inline size_t NzSize() const { return sizeof(ElemType) * m_nz; } // actual number of element bytes in use inline size_t GetNumNZElements() const { return m_nz; } GPUSPARSE_INDEX_TYPE* MajorIndexLocation() const // row/col ids in CSC/CSR format, blockId2col/blockId2row in BlockCol/BlockRow format { return (GPUSPARSE_INDEX_TYPE*) (m_pArray + m_elemSizeAllocated); } GPUSPARSE_INDEX_TYPE* MajorIndexLocationWithSliceViewOffset() const { return (MajorIndexLocation() + (m_format == matrixFormatSparseCSC ? SecondaryIndexValueAt(0) : 0)); } // TODO: Comment these methods more thoroughly, e.g., why it uses numNZ instead of m_elemSizeAllocated. size_t MajorIndexCount() const { return MajorIndexCount(m_numRows, m_numCols, m_nz, m_format); } size_t MajorIndexCount(const size_t numRows, const size_t numCols, const size_t numNZ, const MatrixFormat format) const { if (format == matrixFormatSparseBlockCol) return numCols; else if (format == matrixFormatSparseBlockRow) return numRows; else return numNZ; } size_t MajorIndexSize() const // actual number of major index bytes in use { return sizeof(GPUSPARSE_INDEX_TYPE) * MajorIndexCount(); } GPUSPARSE_INDEX_TYPE* SecondaryIndexLocation() const // compressed index, col/row in CSC/CSR format, col2blockId/row2blockId in BlockCol/BlockRow format { if (m_format == matrixFormatSparseBlockCol) return MajorIndexLocation() + m_numCols; else if (m_format == matrixFormatSparseBlockRow) return MajorIndexLocation() + m_numRows; else return MajorIndexLocation() + m_elemSizeAllocated + m_sliceViewOffset; // return MajorIndexLocation() + m_elemSizeAllocated + m_sliceViewOffset; } size_t SecondaryIndexCount(const size_t numRows, const size_t numCols, const size_t numNZReserved, const MatrixFormat format) const { if (format == matrixFormatSparseBlockCol) return numCols; else if (format == matrixFormatSparseBlockRow) return numRows; else if (format == matrixFormatSparseCSC) return numCols + 1; else if (format == matrixFormatSparseCSR) return numRows + 1; else return numNZReserved; // COO format } size_t SecondaryIndexCount() const { return SecondaryIndexCount(m_numRows, m_numCols, m_elemSizeAllocated, m_format); } // get size for compressed index size_t SecondaryIndexSize() const { return (SecondaryIndexCount()) * sizeof(GPUSPARSE_INDEX_TYPE); } size_t BufferSizeNeeded(const size_t numRows, const size_t numCols, const size_t numNZ, const MatrixFormat format) const { return sizeof(ElemType) * numNZ + sizeof(GPUSPARSE_INDEX_TYPE) * (MajorIndexCount(numRows, numCols, numNZ, format) + SecondaryIndexCount(numRows, numCols, numNZ, format)); } inline size_t BufferSizeAllocated() const { return m_totalBufferSizeAllocated; } inline ElemType* BufferPointer() const { return m_pArray; } inline size_t GetNumElemAllocated() const { return m_elemSizeAllocated; } inline size_t GetSizeElemAllocated() const { return sizeof(ElemType) * m_elemSizeAllocated; } // the column and row locations will swap based on what format we are in. Full index always follows the data array GPUSPARSE_INDEX_TYPE* RowLocation() const { // not a valid function for other formats assert(m_format == matrixFormatSparseCSC || m_format == matrixFormatSparseCSR); return (m_format & matrixFormatRowMajor) ? SecondaryIndexLocation() : MajorIndexLocation(); } size_t RowSize() const // actual number of bytes in use { // not a valid function for other formats assert(m_format == matrixFormatSparseCSC || m_format == matrixFormatSparseCSR); return (m_format & matrixFormatRowMajor) ? SecondaryIndexSize() : MajorIndexSize(); } GPUSPARSE_INDEX_TYPE* ColLocation() const { // not a valid function for other formats assert(m_format == matrixFormatSparseCSC || m_format == matrixFormatSparseCSR); return (m_format & matrixFormatRowMajor) ? MajorIndexLocation() : SecondaryIndexLocation(); } size_t ColSize() const // actual number of bytes in use { // not a valid function for other formats assert(m_format == matrixFormatSparseCSC || m_format == matrixFormatSparseCSR); return (m_format & matrixFormatRowMajor) ? MajorIndexSize() : SecondaryIndexSize(); } GPUSPARSE_INDEX_TYPE SecondaryIndexValueAt(size_t idx) const; GPUSPARSE_INDEX_TYPE* BlockId2ColOrRow() const { // not a valid function for other formats assert(m_format == matrixFormatSparseBlockCol || m_format == matrixFormatSparseBlockRow); return MajorIndexLocation(); } GPUSPARSE_INDEX_TYPE* ColOrRow2BlockId() const { // not a valid function for other formats assert(m_format == matrixFormatSparseBlockCol || m_format == matrixFormatSparseBlockRow); return SecondaryIndexLocation(); } void SetValue(const GPUSparseMatrix& deepCopyFrom); void SetValue(const CPUSparseMatrix& deepCopyFrom); void SetValue(const GPUMatrix& denseMatrix, const MatrixFormat matrixFormat); void SetValue(const GPUMatrix& denseMatrix); void Reshape(const size_t numRows, const size_t numCols); void ResizeAsAndCopyIndexFrom(const GPUSparseMatrix& a, const bool growOnly = true); void Resize(const size_t numRows, const size_t numCols, const size_t numNZElemToReserve, const MatrixFormat matrixFormat, const bool growOnly = true, bool keepExistingValues = true); // matrix format will affect the size to allocate void Resize(const size_t numRows, const size_t numCols, const size_t numNZElemToReserve = 10000, const bool growOnly = true, bool keepExistingValues = false); GPUSparseMatrix Transpose() const; void InplaceTranspose(); GPUSparseMatrix& AssignTransposeOf(const GPUSparseMatrix& a); GPUSparseMatrix ColumnSlice(size_t startColumn, size_t numCols) const; GPUMatrix CopyColumnSliceToDense(size_t startColumn, size_t numCols) const; GPUMatrix DiagonalToDense() const; GPUMatrix CopyToDenseMatrix() const; void CopyToDenseMatrix(GPUMatrix& denseMatrix) const; void CopyToCPUSparseMatrix(CPUSparseMatrix& cpuSparseMatrix) const; void ChangeDeviceTo(DEVICEID_TYPE toId); GPUSparseMatrix& operator=(const GPUSparseMatrix& deepCopy); // #ifndef __unix__ GPUSparseMatrix& operator=(GPUSparseMatrix&& moveFrom); // #endif /* LINUX */ GPUSparseMatrix operator+(const GPUSparseMatrix& a) const; GPUSparseMatrix operator-(const GPUSparseMatrix& a) const; GPUSparseMatrix& operator^=(const ElemType alpha); // element-wise power GPUSparseMatrix operator^(const ElemType alpha) const; // element-wise power GPUSparseMatrix& operator*=(const ElemType alpha); GPUSparseMatrix operator*(const ElemType alpha) const; GPUSparseMatrix& AssignElementPowerOf(const GPUSparseMatrix& a, const ElemType power); bool IsEqualTo(const GPUSparseMatrix& a, const ElemType threshold = 1e-8) const; bool IsEqualTo(const GPUMatrix& a, const ElemType threshold = 1e-8) const; public: virtual DEVICEID_TYPE GetComputeDeviceId(void) const; // Sets sparse matrix in CSR format. this acts as deep copy void SetMatrixFromCSRFormat(const CPUSPARSE_INDEX_TYPE* h_CSRRow, const CPUSPARSE_INDEX_TYPE* h_Col, const ElemType* h_Val, const size_t nz, const size_t numRows, const size_t numCols, const bool IsOnDevice = false, const DEVICEID_TYPE devId = -1); void SetMatrixFromCSCFormat(const CPUSPARSE_INDEX_TYPE* h_CSCCol, const CPUSPARSE_INDEX_TYPE* h_Row, const ElemType* h_Val, const size_t nz, const size_t numRows, const size_t numCols, const bool IsOnDevice = false, const DEVICEID_TYPE devId = -1); // Gets sparse matrix in CSR format. this acts as deep copy. All passed pointers must be NULL. the function will allocate memory itself. void GetMatrixFromCSRFormat(CPUSPARSE_INDEX_TYPE*& h_CSRRow, CPUSPARSE_INDEX_TYPE*& h_Col, ElemType*& h_Val, size_t& numElemAllocated, size_t& nz, size_t& numRows, size_t& numCols) const; void GetMatrixFromCSCFormat(CPUSPARSE_INDEX_TYPE*& h_CSCCol, CPUSPARSE_INDEX_TYPE*& h_Row, ElemType*& h_Val, size_t& numElemAllocated, size_t& nz, size_t& numRows, size_t& numCols) const; void ConvertToSparseFormat(MatrixFormat newFormat); void ConvertToSparseFormat(MatrixFormat newFormat, GPUSparseMatrix& outMatrix) const; bool IsValid() const; public: GPUSparseMatrix& ElementInverse(); GPUSparseMatrix& AssignElementInverseOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceLinearRectifierDerivative(); GPUSparseMatrix& AssignLinearRectifierDerivativeOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceSigmoid(); GPUSparseMatrix& AssignSigmoidOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceTanh(); GPUSparseMatrix& AssignTanhOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceSqrt(); GPUSparseMatrix& AssignSqrtOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceExp(); GPUSparseMatrix& AssignExpOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceLog(); GPUSparseMatrix& AssignLogOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceAbs(); GPUSparseMatrix& AssignAbsOf(const GPUSparseMatrix& a); GPUSparseMatrix& InplaceTruncate(const ElemType threshold); GPUSparseMatrix& InplaceSoftThreshold(const ElemType threshold); GPUSparseMatrix& InplaceTruncateBottom(const ElemType threshold); GPUSparseMatrix& AssignTruncateBottomOf(const GPUSparseMatrix& a, const ElemType threshold); GPUSparseMatrix& InplaceTruncateTop(const ElemType threshold); GPUSparseMatrix& AssignTruncateTopOf(const GPUSparseMatrix& a, const ElemType threshold); GPUSparseMatrix& SetToZeroIfAbsLessThan(const ElemType threshold); ElemType SumOfElements() const; // sum of all elements ElemType SumOfAbsElements() const; // sum of all abs(elements) ElemType FrobeniusNorm() const; ElemType MatrixNormInf() const; ElemType MatrixNorm1() const; ElemType MatrixNorm0() const { return (ElemType) GetNumNZElements(); }; public: // Performs C = alpha ? op ( S ) ? D + beta ? C; Where S is sparse and D and C are dense static void MultiplyAndWeightedAdd(ElemType alpha, const GPUMatrix& a, const bool transposeA, const GPUSparseMatrix& b, const bool transposeB, ElemType beta, GPUMatrix& c); static void MultiplyAndWeightedAdd(ElemType alpha, const GPUSparseMatrix& S, const bool transposeS, const GPUMatrix& D, const bool transposeD, ElemType beta, GPUMatrix& C); static void MultiplyAndAdd(ElemType alpha, const GPUMatrix& lhs, const bool transposeA, const GPUSparseMatrix& rhs, const bool transposeB, GPUSparseMatrix& c); static void ScaleAndAdd(const ElemType alpha, const GPUSparseMatrix& lhs, GPUMatrix& c); static void ConvolveAndWeightedAdd(ElemType alpha, const GPUMatrix& lhs, const bool transposeA, const GPUSparseMatrix& rhs, const bool transposeB, ElemType beta, GPUMatrix& c, size_t numChannels, size_t horizontalSubsample, bool padding, bool channelwise); static void TensorShuffleScaleAndAdd(ElemType keepWeight, const GPUSparseMatrix& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const GPUSparseMatrix& b, GPUSparseMatrix& c); void NormalGrad(GPUMatrix& c, const ElemType momentum); ElemType Adagrad(GPUMatrix& c, const bool needAveMultiplier); static void Multiply(const GPUSparseMatrix& S, const GPUMatrix& D, GPUMatrix& C); static void Multiply(const GPUMatrix& D, const GPUSparseMatrix& S, GPUMatrix& C); static void Multiply(const GPUSparseMatrix& S1, bool transposeS1, const GPUSparseMatrix& S2, bool transposeS2, GPUSparseMatrix& C); GPUSparseMatrix& AssignProductOf(const GPUSparseMatrix& a, const bool transposeA, const GPUSparseMatrix& b, const bool transposeB); static ElemType InnerProductOfMatrices(const GPUSparseMatrix& a, const GPUMatrix& b); static ElemType InnerProductOfMatrices(const GPUMatrix& a, const GPUSparseMatrix& b); static void ScaleAndAdd(ElemType alpha, const GPUSparseMatrix& a, ElemType beta, const GPUSparseMatrix& b, GPUSparseMatrix& c); static void ScaleAndAdd(ElemType alpha, const GPUSparseMatrix& a, ElemType beta, const GPUMatrix& b, GPUMatrix& c); static void ScaleAndAdd(ElemType alpha, const GPUMatrix& a, ElemType beta, const GPUSparseMatrix& b, GPUMatrix& c); static void Scale(ElemType alpha, GPUSparseMatrix& a); static void ElementWisePower(ElemType alpha, const GPUSparseMatrix& a, GPUSparseMatrix& c); static bool AreEqual(const GPUSparseMatrix& a, const GPUSparseMatrix& b, const ElemType threshold = 1e-8); static bool AreEqual(const GPUSparseMatrix& a, const GPUMatrix& b, const ElemType threshold = 1e-8); static bool AreEqual(const GPUMatrix& a, const GPUSparseMatrix& b, const ElemType threshold = 1e-8); // For these two, I should also add a version which would return GPUSparseMatrix, since Dense.*Sparse =Sparse.*Dense=Sparse static GPUMatrix ElementProductOf(const GPUSparseMatrix& a, const GPUMatrix& b); static GPUMatrix ElementProductOf(const GPUMatrix& a, const GPUSparseMatrix& b); public: // See: http://stackoverflow.com/questions/4660123/overloading-friend-operator-for-template-class/4661372#4661372 template friend MATH_API File& operator>>(File& stream, GPUSparseMatrix& us); template friend MATH_API File& operator<<(File& stream, const GPUSparseMatrix& us); private: void* ReserveTempHostBuffer(const size_t sizeInByte) const; template static void CopyBuffer(OutType* outBuffer, const InType* inBuffer, const size_t size); private: void ZeroInit(const MatrixFormat matrixFormat, const DEVICEID_TYPE deviceId); private: void performElementWiseFunction(const ElementWiseOperator kind, const GPUSparseMatrix& src); void DeepCopy(const GPUSparseMatrix& deepCopyFrom); void ReleaseMemory(); void PrepareBuffer(const size_t numRows, const size_t numCols, const bool canReuseBuffer, std::function func); size_t ElemCountFromBufferSize(const size_t numRows, const size_t numCols, const MatrixFormat format, const size_t totalBufferSize) const; size_t ElemCountFromBufferSize() const; DEVICEID_TYPE PrepareDevice(const DEVICEID_TYPE deviceId = -1) const; size_t IdentifyRowsWithValues() const; private: size_t m_totalBufferSizeAllocated; // used by the blockCol and blockRow format size_t m_blockSize; // block size mutable GPUSPARSE_INDEX_TYPE* m_rowToId; // the id showing the order row number is observed in the nnz values. mutable void* m_tempHostBuffer; // used to copy values. mutable size_t m_tempHostBufferSize; GPUSparseMatrix* m_sliceOf; // if this is a slice, then this points to the owning matrix object that we sliced from }; }}}