49 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel>
60 RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,
61 ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,
62 MaxRowsAtCompileTime = BlockRows==0 ? 0
63 : RowsAtCompileTime !=
Dynamic ? int(RowsAtCompileTime)
64 : int(
traits<XprType>::MaxRowsAtCompileTime),
65 MaxColsAtCompileTime = BlockCols==0 ? 0
66 : ColsAtCompileTime !=
Dynamic ? int(ColsAtCompileTime)
67 : int(
traits<XprType>::MaxColsAtCompileTime),
69 IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
70 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
72 HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
73 InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
74 InnerStrideAtCompileTime = HasSameStorageOrderAsXprType
77 OuterStrideAtCompileTime = HasSameStorageOrderAsXprType
81 && (InnerStrideAtCompileTime == 1)
83 MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=
Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ?
AlignedBit : 0,
84 FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ?
LinearAccessBit : 0,
91 Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit
95 template<
typename XprType,
int BlockRows=
Dynamic,
int BlockCols=
Dynamic,
bool InnerPanel =
false,
100 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel,
typename StorageKind>
class BlockImpl;
102 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel>
class Block 103 :
public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind>
117 ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
118 ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
123 inline Block(XprType& xpr, Index a_startRow, Index a_startCol)
124 : Impl(xpr, a_startRow, a_startCol)
127 eigen_assert(a_startRow >= 0 && BlockRows >= 1 && a_startRow + BlockRows <= xpr.rows()
128 && a_startCol >= 0 && BlockCols >= 1 && a_startCol + BlockCols <= xpr.cols());
134 Index a_startRow, Index a_startCol,
135 Index blockRows, Index blockCols)
136 : Impl(xpr, a_startRow, a_startCol, blockRows, blockCols)
139 && (ColsAtCompileTime==
Dynamic || ColsAtCompileTime==blockCols));
140 eigen_assert(a_startRow >= 0 && blockRows >= 0 && a_startRow <= xpr.rows() - blockRows
141 && a_startCol >= 0 && blockCols >= 0 && a_startCol <= xpr.cols() - blockCols);
147 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel>
152 typedef typename XprType::Index
Index;
157 inline BlockImpl(XprType& xpr, Index a_startRow, Index a_startCol) : Impl(xpr, a_startRow, a_startCol) {}
158 inline BlockImpl(XprType& xpr, Index a_startRow, Index a_startCol, Index blockRows, Index blockCols)
159 : Impl(xpr, a_startRow, a_startCol, blockRows, blockCols) {}
165 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel,
bool HasDirectAccess>
class BlockImpl_dense
185 m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
186 m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
187 m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
188 m_blockCols(BlockCols==1 ? 1 : xpr.cols())
194 : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
195 m_blockRows(BlockRows), m_blockCols(BlockCols)
201 Index a_startRow, Index a_startCol,
202 Index blockRows, Index blockCols)
203 : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
204 m_blockRows(blockRows), m_blockCols(blockCols)
207 inline Index
rows()
const {
return m_blockRows.value(); }
208 inline Index
cols()
const {
return m_blockCols.value(); }
213 return m_xpr.const_cast_derived()
214 .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
217 inline const Scalar&
coeffRef(Index rowId, Index colId)
const 219 return m_xpr.derived()
220 .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
225 return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());
231 return m_xpr.const_cast_derived()
232 .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
233 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
238 return m_xpr.const_cast_derived()
239 .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
240 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
243 inline const CoeffReturnType
coeff(Index index)
const 246 .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
247 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
250 template<
int LoadMode>
251 inline PacketScalar
packet(Index rowId, Index colId)
const 253 return m_xpr.template packet<Unaligned>
254 (rowId + m_startRow.value(), colId + m_startCol.value());
257 template<
int LoadMode>
258 inline void writePacket(Index rowId, Index colId,
const PacketScalar& val)
260 m_xpr.const_cast_derived().template writePacket<Unaligned>
261 (rowId + m_startRow.value(), colId + m_startCol.value(), val);
264 template<
int LoadMode>
265 inline PacketScalar
packet(Index index)
const 267 return m_xpr.template packet<Unaligned>
268 (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
269 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
272 template<
int LoadMode>
275 m_xpr.const_cast_derived().template writePacket<Unaligned>
276 (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
277 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);
280 #ifdef EIGEN_PARSED_BY_DOXYGEN 282 inline const Scalar* data()
const;
283 inline Index innerStride()
const;
284 inline Index outerStride()
const;
294 return m_startRow.value();
299 return m_startCol.value();
304 const typename XprType::Nested
m_xpr;
312 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel>
314 :
public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> >
327 (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
328 (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)),
329 BlockRows==1 ? 1 : xpr.rows(),
330 BlockCols==1 ? 1 : xpr.cols()),
347 Index startRow, Index startCol,
348 Index blockRows, Index blockCols)
364 ? m_xpr.innerStride()
365 : m_xpr.outerStride();
371 return m_outerStride;
380 #ifndef EIGEN_PARSED_BY_DOXYGEN 382 inline BlockImpl_dense(XprType& xpr,
const Scalar* data, Index blockRows, Index blockCols)
383 : Base(data, blockRows, blockCols), m_xpr(xpr)
393 ? m_xpr.outerStride()
394 : m_xpr.innerStride();
405 #endif // EIGEN_BLOCK_H #define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
const internal::variable_if_dynamic< Index, RowsAtCompileTime > m_blockRows
traits< XprType >::Scalar Scalar
Base class for Map and Block expression with direct access.
internal::BlockImpl_dense< XprType, BlockRows, BlockCols, InnerPanel > Impl
#define EIGEN_STRONG_INLINE
const internal::remove_all< typename XprType::Nested >::type & nestedExpression() const
BlockImpl_dense(XprType &xpr, Index a_startRow, Index a_startCol)
Scalar & coeffRef(Index rowId, Index colId)
T * const_cast_ptr(const T *ptr)
const unsigned int DirectAccessBit
const unsigned int LvalueBit
iterative scaling algorithm to equilibrate rows and column norms in matrices
const CoeffReturnType coeff(Index index) const
Block< XprType, BlockRows, BlockCols, InnerPanel > BlockType
const internal::variable_if_dynamic< Index, ColsAtCompileTime > m_blockCols
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
const unsigned int RowMajorBit
const unsigned int PacketAccessBit
BlockImpl_dense(XprType &xpr, const Scalar *data, Index blockRows, Index blockCols)
#define EIGEN_STATIC_ASSERT_LVALUE(Derived)
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived)
void writePacket(Index rowId, Index colId, const PacketScalar &val)
Index outerStride() const
Scalar & coeffRef(Index index)
BlockImpl(XprType &xpr, Index a_startRow, Index a_startCol, Index blockRows, Index blockCols)
EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const
const unsigned int AlignedBit
const Scalar & coeffRef(Index index) const
const internal::variable_if_dynamic< Index, XprType::ColsAtCompileTime==1?0:Dynamic > m_startCol
nested< XprType >::type XprTypeNested
const unsigned int HereditaryBits
const Scalar & coeffRef(Index rowId, Index colId) const
void writePacket(Index index, const PacketScalar &val)
BlockImpl_dense(XprType &xpr, Index a_startRow, Index a_startCol, Index blockRows, Index blockCols)
const internal::remove_all< typename XprType::Nested >::type & nestedExpression() const
Index innerStride() const
MapBase< BlockType > Base
Block< XprType, BlockRows, BlockCols, InnerPanel > BlockType
PacketScalar packet(Index index) const
traits< XprType >::XprKind XprKind
const internal::variable_if_dynamic< Index, XprType::RowsAtCompileTime==1?0:Dynamic > m_startRow
Block(XprType &xpr, Index a_startRow, Index a_startCol)
BlockImpl_dense(XprType &xpr, Index startRow, Index startCol)
internal::dense_xpr_base< BlockType >::type Base
traits< XprType >::StorageKind StorageKind
#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression...
Expression of a fixed-size or dynamic-size block.
BlockImpl< XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits< XprType >::StorageKind > Impl
BlockImpl_dense(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
PacketScalar packet(Index rowId, Index colId) const
Block(XprType &xpr, Index a_startRow, Index a_startCol, Index blockRows, Index blockCols)
remove_reference< XprTypeNested >::type _XprTypeNested
void init(int nV, int nC, SymmetricMatrix *H, real_t *g, Matrix *A, const real_t *const lb, const real_t *const ub, const real_t *const lbA, const real_t *const ubA, int nWSR, const real_t *const x0, Options *options, int nOutputs, mxArray *plhs[])
const unsigned int LinearAccessBit
BlockImpl(XprType &xpr, Index a_startRow, Index a_startCol)
const XprType::Nested m_xpr