Home | History | Annotate | Download | only in cuda
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                           License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
     14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
     15 // Third party copyrights are property of their respective owners.
     16 //
     17 // Redistribution and use in source and binary forms, with or without modification,
     18 // are permitted provided that the following conditions are met:
     19 //
     20 //   * Redistribution's of source code must retain the above copyright notice,
     21 //     this list of conditions and the following disclaimer.
     22 //
     23 //   * Redistribution's in binary form must reproduce the above copyright notice,
     24 //     this list of conditions and the following disclaimer in the documentation
     25 //     and/or other materials provided with the distribution.
     26 //
     27 //   * The name of the copyright holders may not be used to endorse or promote products
     28 //     derived from this software without specific prior written permission.
     29 //
     30 // This software is provided by the copyright holders and contributors "as is" and
     31 // any express or implied warranties, including, but not limited to, the implied
     32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     33 // In no event shall the Intel Corporation or contributors be liable for any direct,
     34 // indirect, incidental, special, exemplary, or consequential damages
     35 // (including, but not limited to, procurement of substitute goods or services;
     36 // loss of use, data, or profits; or business interruption) however caused
     37 // and on any theory of liability, whether in contract, strict liability,
     38 // or tort (including negligence or otherwise) arising in any way out of
     39 // the use of this software, even if advised of the possibility of such damage.
     40 //
     41 //M*/
     42 
     43 #ifndef __OPENCV_CUDA_REDUCE_HPP__
     44 #define __OPENCV_CUDA_REDUCE_HPP__
     45 
     46 #include <thrust/tuple.h>
     47 #include "detail/reduce.hpp"
     48 #include "detail/reduce_key_val.hpp"
     49 
     50 /** @file
     51  * @deprecated Use @ref cudev instead.
     52  */
     53 
     54 //! @cond IGNORED
     55 
     56 namespace cv { namespace cuda { namespace device
     57 {
     58     template <int N, typename T, class Op>
     59     __device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op)
     60     {
     61         reduce_detail::Dispatcher<N>::reductor::template reduce<volatile T*, T&, const Op&>(smem, val, tid, op);
     62     }
     63     template <int N,
     64               typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
     65               typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
     66               class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
     67     __device__ __forceinline__ void reduce(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
     68                                            const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
     69                                            unsigned int tid,
     70                                            const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
     71     {
     72         reduce_detail::Dispatcher<N>::reductor::template reduce<
     73                 const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>&,
     74                 const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>&,
     75                 const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>&>(smem, val, tid, op);
     76     }
     77 
     78     template <unsigned int N, typename K, typename V, class Cmp>
     79     __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp)
     80     {
     81         reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&, volatile V*, V&, const Cmp&>(skeys, key, svals, val, tid, cmp);
     82     }
     83     template <unsigned int N,
     84               typename K,
     85               typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
     86               typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
     87               class Cmp>
     88     __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key,
     89                                                  const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
     90                                                  const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
     91                                                  unsigned int tid, const Cmp& cmp)
     92     {
     93         reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&,
     94                 const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
     95                 const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
     96                 const Cmp&>(skeys, key, svals, val, tid, cmp);
     97     }
     98     template <unsigned int N,
     99               typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
    100               typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
    101               typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
    102               typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
    103               class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
    104     __device__ __forceinline__ void reduceKeyVal(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
    105                                                  const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
    106                                                  const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
    107                                                  const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
    108                                                  unsigned int tid,
    109                                                  const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp)
    110     {
    111         reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<
    112                 const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>&,
    113                 const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>&,
    114                 const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
    115                 const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
    116                 const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>&
    117                 >(skeys, key, svals, val, tid, cmp);
    118     }
    119 
    120     // smem_tuple
    121 
    122     template <typename T0>
    123     __device__ __forceinline__
    124     thrust::tuple<volatile T0*>
    125     smem_tuple(T0* t0)
    126     {
    127         return thrust::make_tuple((volatile T0*) t0);
    128     }
    129 
    130     template <typename T0, typename T1>
    131     __device__ __forceinline__
    132     thrust::tuple<volatile T0*, volatile T1*>
    133     smem_tuple(T0* t0, T1* t1)
    134     {
    135         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1);
    136     }
    137 
    138     template <typename T0, typename T1, typename T2>
    139     __device__ __forceinline__
    140     thrust::tuple<volatile T0*, volatile T1*, volatile T2*>
    141     smem_tuple(T0* t0, T1* t1, T2* t2)
    142     {
    143         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2);
    144     }
    145 
    146     template <typename T0, typename T1, typename T2, typename T3>
    147     __device__ __forceinline__
    148     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*>
    149     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3)
    150     {
    151         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3);
    152     }
    153 
    154     template <typename T0, typename T1, typename T2, typename T3, typename T4>
    155     __device__ __forceinline__
    156     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*>
    157     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4)
    158     {
    159         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4);
    160     }
    161 
    162     template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
    163     __device__ __forceinline__
    164     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*>
    165     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5)
    166     {
    167         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5);
    168     }
    169 
    170     template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
    171     __device__ __forceinline__
    172     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*>
    173     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6)
    174     {
    175         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6);
    176     }
    177 
    178     template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
    179     __device__ __forceinline__
    180     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*>
    181     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7)
    182     {
    183         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7);
    184     }
    185 
    186     template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
    187     __device__ __forceinline__
    188     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*>
    189     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8)
    190     {
    191         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8);
    192     }
    193 
    194     template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
    195     __device__ __forceinline__
    196     thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*, volatile T9*>
    197     smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9)
    198     {
    199         return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9);
    200     }
    201 }}}
    202 
    203 //! @endcond
    204 
    205 #endif // __OPENCV_CUDA_UTILITY_HPP__
    206