Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GaussianProcessTuning/HeuristicLab.Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h @ 9562

Last change on this file since 9562 was 9562, checked in by gkronber, 11 years ago

#1967 worked on Gaussian process evolution.

File size: 15.7 KB
Line 
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
11#define EIGEN_GENERAL_MATRIX_MATRIX_H
12
13namespace Eigen {
14
15namespace internal {
16
17template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
18
19/* Specialization for a row-major destination matrix => simple transposition of the product */
20template<
21  typename Index,
22  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
23  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
24struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
25{
26  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
27  static EIGEN_STRONG_INLINE void run(
28    Index rows, Index cols, Index depth,
29    const LhsScalar* lhs, Index lhsStride,
30    const RhsScalar* rhs, Index rhsStride,
31    ResScalar* res, Index resStride,
32    ResScalar alpha,
33    level3_blocking<RhsScalar,LhsScalar>& blocking,
34    GemmParallelInfo<Index>* info = 0)
35  {
36    // transpose the product such that the result is column major
37    general_matrix_matrix_product<Index,
38      RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
39      LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
40      ColMajor>
41    ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
42  }
43};
44
45/*  Specialization for a col-major destination matrix
46 *    => Blocking algorithm following Goto's paper */
47template<
48  typename Index,
49  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
50  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
51struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
52{
53typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
54static void run(Index rows, Index cols, Index depth,
55  const LhsScalar* _lhs, Index lhsStride,
56  const RhsScalar* _rhs, Index rhsStride,
57  ResScalar* res, Index resStride,
58  ResScalar alpha,
59  level3_blocking<LhsScalar,RhsScalar>& blocking,
60  GemmParallelInfo<Index>* info = 0)
61{
62  const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
63  const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
64
65  typedef gebp_traits<LhsScalar,RhsScalar> Traits;
66
67  Index kc = blocking.kc();                   // cache block size along the K direction
68  Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction
69  //Index nc = blocking.nc(); // cache block size along the N direction
70
71  gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
72  gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
73  gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
74
75#ifdef EIGEN_HAS_OPENMP
76  if(info)
77  {
78    // this is the parallel version!
79    Index tid = omp_get_thread_num();
80    Index threads = omp_get_num_threads();
81   
82    std::size_t sizeA = kc*mc;
83    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
84    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
85    ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
86   
87    RhsScalar* blockB = blocking.blockB();
88    eigen_internal_assert(blockB!=0);
89
90    // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
91    for(Index k=0; k<depth; k+=kc)
92    {
93      const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
94
95      // In order to reduce the chance that a thread has to wait for the other,
96      // let's start by packing A'.
97      pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
98
99      // Pack B_k to B' in a parallel fashion:
100      // each thread packs the sub block B_k,j to B'_j where j is the thread id.
101
102      // However, before copying to B'_j, we have to make sure that no other thread is still using it,
103      // i.e., we test that info[tid].users equals 0.
104      // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
105      while(info[tid].users!=0) {}
106      info[tid].users += threads;
107
108      pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
109
110      // Notify the other threads that the part B'_j is ready to go.
111      info[tid].sync = k;
112
113      // Computes C_i += A' * B' per B'_j
114      for(Index shift=0; shift<threads; ++shift)
115      {
116        Index j = (tid+shift)%threads;
117
118        // At this point we have to make sure that B'_j has been updated by the thread j,
119        // we use testAndSetOrdered to mimic a volatile access.
120        // However, no need to wait for the B' part which has been updated by the current thread!
121        if(shift>0)
122          while(info[j].sync!=k) {}
123
124        gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
125      }
126
127      // Then keep going as usual with the remaining A'
128      for(Index i=mc; i<rows; i+=mc)
129      {
130        const Index actual_mc = (std::min)(i+mc,rows)-i;
131
132        // pack A_i,k to A'
133        pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
134
135        // C_i += A' * B'
136        gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
137      }
138
139      // Release all the sub blocks B'_j of B' for the current thread,
140      // i.e., we simply decrement the number of users by 1
141      for(Index j=0; j<threads; ++j)
142        #pragma omp atomic
143        --(info[j].users);
144    }
145  }
146  else
147#endif // EIGEN_HAS_OPENMP
148  {
149    EIGEN_UNUSED_VARIABLE(info);
150
151    // this is the sequential version!
152    std::size_t sizeA = kc*mc;
153    std::size_t sizeB = kc*cols;
154    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
155
156    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
157    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
158    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
159
160    // For each horizontal panel of the rhs, and corresponding panel of the lhs...
161    // (==GEMM_VAR1)
162    for(Index k2=0; k2<depth; k2+=kc)
163    {
164      const Index actual_kc = (std::min)(k2+kc,depth)-k2;
165
166      // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
167      // => Pack rhs's panel into a sequential chunk of memory (L2 caching)
168      // Note that this panel will be read as many times as the number of blocks in the lhs's
169      // vertical panel which is, in practice, a very low number.
170      pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
171
172
173      // For each mc x kc block of the lhs's vertical panel...
174      // (==GEPP_VAR1)
175      for(Index i2=0; i2<rows; i2+=mc)
176      {
177        const Index actual_mc = (std::min)(i2+mc,rows)-i2;
178
179        // We pack the lhs's block into a sequential chunk of memory (L1 caching)
180        // Note that this block will be read a very high number of times, which is equal to the number of
181        // micro vertical panel of the large rhs's panel (e.g., cols/4 times).
182        pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
183
184        // Everything is packed, we can now call the block * panel kernel:
185        gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
186
187      }
188    }
189  }
190}
191
192};
193
194/*********************************************************************************
195*  Specialization of GeneralProduct<> for "large" GEMM, i.e.,
196*  implementation of the high level wrapper to general_matrix_matrix_product
197**********************************************************************************/
198
199template<typename Lhs, typename Rhs>
200struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
201 : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
202{};
203
204template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
205struct gemm_functor
206{
207  gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
208                  BlockingType& blocking)
209    : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
210  {}
211
212  void initParallelSession() const
213  {
214    m_blocking.allocateB();
215  }
216
217  void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
218  {
219    if(cols==-1)
220      cols = m_rhs.cols();
221
222    Gemm::run(rows, cols, m_lhs.cols(),
223              /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
224              /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
225              (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
226              m_actualAlpha, m_blocking, info);
227  }
228
229  protected:
230    const Lhs& m_lhs;
231    const Rhs& m_rhs;
232    Dest& m_dest;
233    Scalar m_actualAlpha;
234    BlockingType& m_blocking;
235};
236
237template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
238bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
239
240template<typename _LhsScalar, typename _RhsScalar>
241class level3_blocking
242{
243    typedef _LhsScalar LhsScalar;
244    typedef _RhsScalar RhsScalar;
245
246  protected:
247    LhsScalar* m_blockA;
248    RhsScalar* m_blockB;
249    RhsScalar* m_blockW;
250
251    DenseIndex m_mc;
252    DenseIndex m_nc;
253    DenseIndex m_kc;
254
255  public:
256
257    level3_blocking()
258      : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
259    {}
260
261    inline DenseIndex mc() const { return m_mc; }
262    inline DenseIndex nc() const { return m_nc; }
263    inline DenseIndex kc() const { return m_kc; }
264
265    inline LhsScalar* blockA() { return m_blockA; }
266    inline RhsScalar* blockB() { return m_blockB; }
267    inline RhsScalar* blockW() { return m_blockW; }
268};
269
270template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
271class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
272  : public level3_blocking<
273      typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
274      typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
275{
276    enum {
277      Transpose = StorageOrder==RowMajor,
278      ActualRows = Transpose ? MaxCols : MaxRows,
279      ActualCols = Transpose ? MaxRows : MaxCols
280    };
281    typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
282    typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
283    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
284    enum {
285      SizeA = ActualRows * MaxDepth,
286      SizeB = ActualCols * MaxDepth,
287      SizeW = MaxDepth * Traits::WorkSpaceFactor
288    };
289
290    EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
291    EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
292    EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
293
294  public:
295
296    gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
297    {
298      this->m_mc = ActualRows;
299      this->m_nc = ActualCols;
300      this->m_kc = MaxDepth;
301      this->m_blockA = m_staticA;
302      this->m_blockB = m_staticB;
303      this->m_blockW = m_staticW;
304    }
305
306    inline void allocateA() {}
307    inline void allocateB() {}
308    inline void allocateW() {}
309    inline void allocateAll() {}
310};
311
312template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
313class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
314  : public level3_blocking<
315      typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
316      typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
317{
318    enum {
319      Transpose = StorageOrder==RowMajor
320    };
321    typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
322    typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
323    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
324
325    DenseIndex m_sizeA;
326    DenseIndex m_sizeB;
327    DenseIndex m_sizeW;
328
329  public:
330
331    gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
332    {
333      this->m_mc = Transpose ? cols : rows;
334      this->m_nc = Transpose ? rows : cols;
335      this->m_kc = depth;
336
337      computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
338      m_sizeA = this->m_mc * this->m_kc;
339      m_sizeB = this->m_kc * this->m_nc;
340      m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
341    }
342
343    void allocateA()
344    {
345      if(this->m_blockA==0)
346        this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
347    }
348
349    void allocateB()
350    {
351      if(this->m_blockB==0)
352        this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
353    }
354
355    void allocateW()
356    {
357      if(this->m_blockW==0)
358        this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
359    }
360
361    void allocateAll()
362    {
363      allocateA();
364      allocateB();
365      allocateW();
366    }
367
368    ~gemm_blocking_space()
369    {
370      aligned_delete(this->m_blockA, m_sizeA);
371      aligned_delete(this->m_blockB, m_sizeB);
372      aligned_delete(this->m_blockW, m_sizeW);
373    }
374};
375
376} // end namespace internal
377
378template<typename Lhs, typename Rhs>
379class GeneralProduct<Lhs, Rhs, GemmProduct>
380  : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
381{
382    enum {
383      MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
384    };
385  public:
386    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
387   
388    typedef typename  Lhs::Scalar LhsScalar;
389    typedef typename  Rhs::Scalar RhsScalar;
390    typedef           Scalar      ResScalar;
391
392    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
393    {
394      typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
395      EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
396    }
397
398    template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
399    {
400      eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
401
402      typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
403      typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
404
405      Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
406                                 * RhsBlasTraits::extractScalarFactor(m_rhs);
407
408      typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
409              Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
410
411      typedef internal::gemm_functor<
412        Scalar, Index,
413        internal::general_matrix_matrix_product<
414          Index,
415          LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
416          RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
417          (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
418        _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
419
420      BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
421
422      internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
423    }
424};
425
426} // end namespace Eigen
427
428#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
Note: See TracBrowser for help on using the repository browser.