// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2013 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
namespace Eigen { /** \ingroup SparseCore_Module * * \class BlockSparseMatrix * * \brief A versatile sparse matrix representation where each element is a block * * This class provides routines to manipulate block sparse matrices stored in a * BSR-like representation. There are two main types : * * 1. All blocks have the same number of rows and columns, called block size * in the following. In this case, if this block size is known at compile time, * it can be given as a template parameter like * \code * BlockSparseMatrix<Scalar, 3, ColMajor> bmat(b_rows, b_cols); * \endcode * Here, bmat is a b_rows x b_cols block sparse matrix * where each coefficient is a 3x3 dense matrix. * If the block size is fixed but will be given at runtime, * \code * BlockSparseMatrix<Scalar, Dynamic, ColMajor> bmat(b_rows, b_cols); * bmat.setBlockSize(block_size); * \endcode * * 2. The second case is for variable-block sparse matrices. * Here each block has its own dimensions. The only restriction is that all the blocks * in a row (resp. a column) should have the same number of rows (resp. of columns). * It is thus required in this case to describe the layout of the matrix by calling * setBlockLayout(rowBlocks, colBlocks). * * In any of the previous case, the matrix can be filled by calling setFromTriplets(). * A regular sparse matrix can be converted to a block sparse matrix and vice versa. * It is obviously required to describe the block layout beforehand by calling either * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks. * * \tparam _Scalar The Scalar type * \tparam _BlockAtCompileTime The block layout option. It takes the following values * Dynamic : block size known at runtime * a numeric number : fixed-size block known at compile time
*/ template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
/** * \brief Copy-constructor
*/
BlockSparseMatrix(const BlockSparseMatrix& other)
: m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize),
m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros),
m_blockPtr(0),m_blockSize(other.m_blockSize)
{ // should we allow copying between variable-size blocks and fixed-size blocks ??
eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
/** * \brief Assignment from a sparse matrix with the same storage order * * Convert from a sparse matrix to block sparse matrix. * \warning Before calling this function, tt is necessary to call * either setBlockLayout() (matrices with variable-size blocks) * or setBlockSize() (for fixed-size blocks).
*/ template<typename MatrixType> inline BlockSparseMatrix& operator=(const MatrixType& spmat)
{
eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
&& "Trying to assign to a zero-size matrix, call resize() first");
eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order"); typedef SparseMatrix<bool,MatrixType::Options,typename MatrixType::Index> MatrixPatternType;
MatrixPatternType blockPattern(blockRows(), blockCols());
m_nonzeros = 0;
// First, compute the number of nonzero blocks and their locations for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
{ // Browse each outer block and compute the structure
std::vector<bool> nzblocksFlag(m_innerBSize,false); // Record the existing blocks
blockPattern.startVec(bj); for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
{ typename MatrixType::InnerIterator it_spmat(spmat, j); for(; it_spmat; ++it_spmat)
{
StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block if(!nzblocksFlag[bi])
{ // Save the index of this nonzero block
nzblocksFlag[bi] = true;
blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true; // Compute the total number of nonzeros (including explicit zeros in blocks)
m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
}
}
} // end current outer block
}
blockPattern.finalize();
// Allocate the internal arrays
setBlockStructure(blockPattern);
for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0); for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
{ // Now copy the values for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
{ // Browse the outer block column by column (for column-major matrices) typename MatrixType::InnerIterator it_spmat(spmat, j); for(; it_spmat; ++it_spmat)
{
StorageIndex idx = 0; // Position of this block in the column block
StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block // Go to the inner block where this element belongs to while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
StorageIndex idxVal;// Get the right position in the array of values for this element if(m_blockSize == Dynamic)
{ // Offset from all blocks before ...
idxVal = m_blockPtr[m_outerIndex[bj]+idx]; // ... and offset inside the block
idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
} else
{ // All blocks before
idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize; // inside the block
idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
} // Insert the value
m_values[idxVal] = it_spmat.value();
} // end of this column
} // end of this block
} // end of this outer block
return *this;
}
/** * \brief Set the nonzero block pattern of the matrix * * Given a sparse matrix describing the nonzero block pattern, * this function prepares the internal pointers for values. * After calling this function, any *nonzero* block (bi, bj) can be set * with a simple call to coeffRef(bi,bj). * * * \warning Before calling this function, tt is necessary to call * either setBlockLayout() (matrices with variable-size blocks) * or setBlockSize() (for fixed-size blocks). * * \param blockPattern Sparse matrix of boolean elements describing the block structure * * \sa setBlockLayout() \sa setBlockSize()
*/ template<typename MatrixType> void setBlockStructure(const MatrixType& blockPattern)
{
resize(blockPattern.rows(), blockPattern.cols());
reserve(blockPattern.nonZeros());
// Browse the block pattern and set up the various pointers
m_outerIndex[0] = 0; if(m_blockSize == Dynamic) m_blockPtr[0] = 0; for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0); for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
{ //Browse each outer block
//First, copy and save the indices of nonzero blocks //FIXME : find a way to avoid this ...
std::vector<int> nzBlockIdx; typename MatrixType::InnerIterator it(blockPattern, bj); for(; it; ++it)
{
nzBlockIdx.push_back(it.index());
}
std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
// Now, fill block indices and (eventually) pointers to blocks for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
{
StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
m_indices[offset] = nzBlockIdx[idx]; if(m_blockSize == Dynamic)
m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj); // There is no blockPtr for fixed-size blocks... not needed !???
} // Save the pointer to the next outer block
m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
}
}
/** * \brief Set the number of rows and columns blocks
*/ inlinevoid resize(Index brow, Index bcol)
{
m_innerBSize = IsColMajor ? brow : bcol;
m_outerBSize = IsColMajor ? bcol : brow;
}
/** * \brief set the block size at runtime for fixed-size block layout * * Call this only for fixed-size blocks
*/ inlinevoid setBlockSize(Index blockSize)
{
m_blockSize = blockSize;
}
/** * \brief Set the row and column block layouts, * * This function set the size of each row and column block. * So this function should be used only for blocks with variable size. * \param rowBlocks : Number of rows per row block * \param colBlocks : Number of columns per column block * \sa resize(), setBlockSize()
*/ inlinevoid setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
{ const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks; const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
m_outerBSize = outerBlocks.size(); // starting index of blocks... cumulative sums
m_innerOffset = new StorageIndex[m_innerBSize+1];
m_outerOffset = new StorageIndex[m_outerBSize+1];
m_innerOffset[0] = 0;
m_outerOffset[0] = 0;
std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
// Compute the total number of nonzeros
m_nonzeros = 0; for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
}
/** * \brief Allocate the internal array of pointers to blocks and their inner indices * * \note For fixed-size blocks, call setBlockSize() to set the block. * And For variable-size blocks, call setBlockLayout() before using this function * * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is * is computed in setBlockLayout() for variable-size blocks * \sa setBlockSize()
*/ inlinevoid reserve(const Index nonzerosblocks)
{
eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) && "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
//FIXME Should free if already allocated
m_outerIndex = new StorageIndex[m_outerBSize+1];
m_nonzerosblocks = nonzerosblocks; if(m_blockSize != Dynamic)
{
m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
m_blockPtr = 0;
} else
{ // m_nonzeros is already computed in setBlockLayout()
m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
}
m_indices = new StorageIndex[m_nonzerosblocks+1];
m_values = new Scalar[m_nonzeros];
}
/** * \brief Fill values in a matrix from a triplet list. * * Each triplet item has a block stored in an Eigen dense matrix. * The InputIterator class should provide the functions row(), col() and value() * * \note For fixed-size blocks, call setBlockSize() before this function. * * FIXME Do not accept duplicates
*/ template<typename InputIterator> void setFromTriplets(const InputIterator& begin, const InputIterator& end)
{
eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
/* First, sort the triplet list * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted * The best approach is like in SparseMatrix::setFromTriplets()
*/
internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
std::sort(begin, end, tripletcomp);
/* Count the number of rows and column blocks, * and the number of nonzero blocks per outer dimension
*/
VectorXi rowBlocks(m_innerBSize); // Size of each block row
VectorXi colBlocks(m_outerBSize); // Size of each block column
rowBlocks.setZero(); colBlocks.setZero();
VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector
VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks
nzblock_outer.setZero();
nz_outer.setZero(); for(InputIterator it(begin); it !=end; ++it)
{
eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
|| (m_blockSize == Dynamic));
// An alternative when the outer indices are sorted...no need to use an array of markers // for(Index bcol = 0; bcol < m_outerBSize; ++bcol) // { // Index id = 0, id_nz = 0, id_nzblock = 0; // for(InputIterator it(begin); it!=end; ++it) // { // while (id<bcol) // one pass should do the job unless there are empty columns // { // id++; // m_outerIndex[id+1]=m_outerIndex[id]; // } // m_outerIndex[id+1] += 1; // m_indices[id_nzblock]=brow; // Index block_size = it->value().rows()*it->value().cols(); // m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size; // id_nzblock++; // memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar)); // id_nz += block_size; // } // while(id < m_outerBSize-1) // Empty columns at the end // { // id++; // m_outerIndex[id+1]=m_outerIndex[id]; // } // }
}
/** * \returns the number of rows
*/ inline Index rows() const
{ // return blockRows(); return (IsColMajor ? innerSize() : outerSize());
}
/** * \returns the number of cols
*/ inline Index cols() const
{ // return blockCols(); return (IsColMajor ? outerSize() : innerSize());
}
inline Index outerSize() const
{ if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize]; elsereturn (m_outerBSize * m_blockSize) ;
} /** \returns the number of rows grouped by blocks */ inline Index blockRows() const
{ return (IsColMajor ? m_innerBSize : m_outerBSize);
} /** \returns the number of columns grouped by blocks */ inline Index blockCols() const
{ return (IsColMajor ? m_outerBSize : m_innerBSize);
}
inline Index outerBlocks() const { return m_outerBSize; } inline Index innerBlocks() const { return m_innerBSize; }
/** \returns the block index where outer belongs to */ inline Index outerToBlock(Index outer) const
{
eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
StorageIndex b_outer = 0; while(m_outerOffset[b_outer] <= outer) ++b_outer; return b_outer - 1;
} /** \returns the block index where inner belongs to */ inline Index innerToBlock(Index inner) const
{
eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
/** *\returns a reference to the (i,j) block as an Eigen Dense Matrix
*/
Ref<BlockScalar> coeffRef(Index brow, Index bcol)
{
eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
/** * \returns the value of the (i,j) block as an Eigen Dense Matrix
*/
Map<const BlockScalar> coeff(Index brow, Index bcol) const
{
eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
/** \returns the number of nonzero blocks */ inline Index nonZerosBlocks() const { return m_nonzerosblocks; } /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */ inline Index nonZeros() const { return m_nonzeros; }
/** \brief for compatibility purposes with the SparseMatrix class */ inlinebool isCompressed() const {returntrue;} /** * \returns the starting index of the bi row block
*/ inline Index blockRowsIndex(Index bi) const
{ return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
}
/** * \returns the starting index of the bj col block
*/ inline Index blockColsIndex(Index bj) const
{ return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
}
/** * \returns the starting position of the block \p id in the array of values
*/
Index blockPtr(Index id) const
{ if(m_blockSize == Dynamic) return m_blockPtr[id]; elsereturn id * m_blockSize * m_blockSize; //return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type());
}
// To be implemented // Insert a block at a particular location... need to make a room for that
Map<BlockScalar> insert(Index brow, Index bcol);
Index m_innerBSize; // Number of block rows
Index m_outerBSize; // Number of block columns
StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize)
Index m_nonzeros; // Total nonzeros elements
Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
};
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex> class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
{ public:
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.