Home | History | Annotate | Download | only in ippcp
      1 /*******************************************************************************
      2 * Copyright 2002-2018 Intel Corporation
      3 * All Rights Reserved.
      4 *
      5 * If this  software was obtained  under the  Intel Simplified  Software License,
      6 * the following terms apply:
      7 *
      8 * The source code,  information  and material  ("Material") contained  herein is
      9 * owned by Intel Corporation or its  suppliers or licensors,  and  title to such
     10 * Material remains with Intel  Corporation or its  suppliers or  licensors.  The
     11 * Material  contains  proprietary  information  of  Intel or  its suppliers  and
     12 * licensors.  The Material is protected by  worldwide copyright  laws and treaty
     13 * provisions.  No part  of  the  Material   may  be  used,  copied,  reproduced,
     14 * modified, published,  uploaded, posted, transmitted,  distributed or disclosed
     15 * in any way without Intel's prior express written permission.  No license under
     16 * any patent,  copyright or other  intellectual property rights  in the Material
     17 * is granted to  or  conferred  upon  you,  either   expressly,  by implication,
     18 * inducement,  estoppel  or  otherwise.  Any  license   under such  intellectual
     19 * property rights must be express and approved by Intel in writing.
     20 *
     21 * Unless otherwise agreed by Intel in writing,  you may not remove or alter this
     22 * notice or  any  other  notice   embedded  in  Materials  by  Intel  or Intel's
     23 * suppliers or licensors in any way.
     24 *
     25 *
     26 * If this  software  was obtained  under the  Apache License,  Version  2.0 (the
     27 * "License"), the following terms apply:
     28 *
     29 * You may  not use this  file except  in compliance  with  the License.  You may
     30 * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
     31 *
     32 *
     33 * Unless  required  by   applicable  law  or  agreed  to  in  writing,  software
     34 * distributed under the License  is distributed  on an  "AS IS"  BASIS,  WITHOUT
     35 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     36 *
     37 * See the   License  for the   specific  language   governing   permissions  and
     38 * limitations under the License.
     39 *******************************************************************************/
     40 /******* FILE MODIFIED FROM ORIGINAL 2019u1 RELEASE TO AVOID WARNINGS *********/
     41 /*
     42 //
     43 //  Purpose:
     44 //     Cryptography Primitive.
     45 //     Internal Definitions of Block Cipher Tools
     46 //
     47 //
     48 */
     49 
     50 #if !defined(_CP_TOOL_H)
     51 #define _CP_TOOL_H
     52 
     53 #include "pcpmask_ct.h"
     54 
     55 #define _NEW_COPY16_
     56 #define _NEW_XOR16_
     57 
     58 /* copy data block */
     59 __INLINE void CopyBlock(const void* pSrc, void* pDst, cpSize numBytes)
     60 {
     61    const Ipp8u* s  = (Ipp8u*)pSrc;
     62    Ipp8u* d  = (Ipp8u*)pDst;
     63    cpSize k;
     64    for(k=0; k<numBytes; k++ )
     65       d[k] = s[k];
     66 }
     67 __INLINE void CopyBlock8(const void* pSrc, void* pDst)
     68 {
     69    int k;
     70    for(k=0; k<8; k++ )
     71       ((Ipp8u*)pDst)[k] = ((Ipp8u*)pSrc)[k];
     72 }
     73 
     74 #if defined(_NEW_COPY16_)
     75 __INLINE void CopyBlock16(const void* pSrc, void* pDst)
     76 {
     77 #if (_IPP_ARCH ==_IPP_ARCH_EM64T)
     78    ((Ipp64u*)pDst)[0] = ((Ipp64u*)pSrc)[0];
     79    ((Ipp64u*)pDst)[1] = ((Ipp64u*)pSrc)[1];
     80 #else
     81    ((Ipp32u*)pDst)[0] = ((Ipp32u*)pSrc)[0];
     82    ((Ipp32u*)pDst)[1] = ((Ipp32u*)pSrc)[1];
     83    ((Ipp32u*)pDst)[2] = ((Ipp32u*)pSrc)[2];
     84    ((Ipp32u*)pDst)[3] = ((Ipp32u*)pSrc)[3];
     85 #endif
     86 }
     87 #else
     88 __INLINE void CopyBlock16(const void* pSrc, void* pDst)
     89 {
     90    int k;
     91    for(k=0; k<16; k++ )
     92       ((Ipp8u*)pDst)[k] = ((Ipp8u*)pSrc)[k];
     93 }
     94 #endif
     95 
     96 __INLINE void CopyBlock24(const void* pSrc, void* pDst)
     97 {
     98    int k;
     99    for(k=0; k<24; k++ )
    100       ((Ipp8u*)pDst)[k] = ((Ipp8u*)pSrc)[k];
    101 }
    102 __INLINE void CopyBlock32(const void* pSrc, void* pDst)
    103 {
    104    int k;
    105    for(k=0; k<32; k++ )
    106       ((Ipp8u*)pDst)[k] = ((Ipp8u*)pSrc)[k];
    107 }
    108 
    109 /*
    110 // padding data block
    111 */
    112 __INLINE void PaddBlock(Ipp8u paddingByte, void* pDst, cpSize numBytes)
    113 {
    114    Ipp8u* d  = (Ipp8u*)pDst;
    115    cpSize k;
    116    for(k=0; k<numBytes; k++ )
    117       d[k] = paddingByte;
    118 }
    119 
    120 #if !((_IPP>=_IPP_W7) || (_IPP32E>=_IPP32E_M7))
    121 __INLINE void PurgeBlock(void* pDst, int len)
    122 {
    123    int n;
    124    for(n=0; n<len; n++) ((Ipp8u*)pDst)[n] = 0;
    125 }
    126 #else
    127 #define PurgeBlock OWNAPI(PurgeBlock)
    128 void PurgeBlock(void* pDst, int len);
    129 #endif
    130 
    131 /* fill block */
    132 __INLINE void FillBlock16(Ipp8u filler, const void* pSrc, void* pDst, int len)
    133 {
    134    int n;
    135    for(n=0; n<len; n++) ((Ipp8u*)pDst)[n] = ((Ipp8u*)pSrc)[n];
    136    for(; n<16; n++) ((Ipp8u*)pDst)[n] = filler;
    137 }
    138 
    139 /* xor block */
    140 __INLINE void XorBlock(const void* pSrc1, const void* pSrc2, void* pDst, int len)
    141 {
    142    const Ipp8u* p1 = (const Ipp8u*)pSrc1;
    143    const Ipp8u* p2 = (const Ipp8u*)pSrc2;
    144    Ipp8u* d  = (Ipp8u*)pDst;
    145    int k;
    146    for(k=0; k<len; k++)
    147       d[k] = (Ipp8u)(p1[k] ^p2[k]);
    148 }
    149 __INLINE void XorBlock8(const void* pSrc1, const void* pSrc2, void* pDst)
    150 {
    151    const Ipp8u* p1 = (const Ipp8u*)pSrc1;
    152    const Ipp8u* p2 = (const Ipp8u*)pSrc2;
    153    Ipp8u* d  = (Ipp8u*)pDst;
    154    int k;
    155    for(k=0; k<8; k++ )
    156       d[k] = (Ipp8u)(p1[k] ^p2[k]);
    157 }
    158 
    159 #if defined(_NEW_XOR16_)
    160 __INLINE void XorBlock16(const void* pSrc1, const void* pSrc2, void* pDst)
    161 {
    162 #if (_IPP_ARCH ==_IPP_ARCH_EM64T)
    163    ((Ipp64u*)pDst)[0] = ((Ipp64u*)pSrc1)[0] ^ ((Ipp64u*)pSrc2)[0];
    164    ((Ipp64u*)pDst)[1] = ((Ipp64u*)pSrc1)[1] ^ ((Ipp64u*)pSrc2)[1];
    165 #else
    166    ((Ipp32u*)pDst)[0] = ((Ipp32u*)pSrc1)[0] ^ ((Ipp32u*)pSrc2)[0];
    167    ((Ipp32u*)pDst)[1] = ((Ipp32u*)pSrc1)[1] ^ ((Ipp32u*)pSrc2)[1];
    168    ((Ipp32u*)pDst)[2] = ((Ipp32u*)pSrc1)[2] ^ ((Ipp32u*)pSrc2)[2];
    169    ((Ipp32u*)pDst)[3] = ((Ipp32u*)pSrc1)[3] ^ ((Ipp32u*)pSrc2)[3];
    170 #endif
    171 }
    172 #else
    173 __INLINE void XorBlock16(const void* pSrc1, const void* pSrc2, void* pDst)
    174 {
    175    const Ipp8u* p1 = (const Ipp8u*)pSrc1;
    176    const Ipp8u* p2 = (const Ipp8u*)pSrc2;
    177    Ipp8u* d  = (Ipp8u*)pDst;
    178    int k;
    179    for(k=0; k<16; k++ )
    180       d[k] = (Ipp8u)(p1[k] ^p2[k]);
    181 }
    182 #endif
    183 
    184 __INLINE void XorBlock24(const void* pSrc1, const void* pSrc2, void* pDst)
    185 {
    186    const Ipp8u* p1 = (const Ipp8u*)pSrc1;
    187    const Ipp8u* p2 = (const Ipp8u*)pSrc2;
    188    Ipp8u* d  = (Ipp8u*)pDst;
    189    int k;
    190    for(k=0; k<24; k++ )
    191       d[k] = (Ipp8u)(p1[k] ^p2[k]);
    192 }
    193 __INLINE void XorBlock32(const void* pSrc1, const void* pSrc2, void* pDst)
    194 {
    195    const Ipp8u* p1 = (const Ipp8u*)pSrc1;
    196    const Ipp8u* p2 = (const Ipp8u*)pSrc2;
    197    Ipp8u* d  = (Ipp8u*)pDst;
    198    int k;
    199    for(k=0; k<32; k++ )
    200       d[k] = (Ipp8u)(p1[k] ^p2[k]);
    201 }
    202 
    203 
    204 /* compare (equivalence) */
    205 __INLINE int EquBlock(const void* pSrc1, const void* pSrc2, int len)
    206 {
    207    const Ipp8u* p1 = (const Ipp8u*)pSrc1;
    208    const Ipp8u* p2 = (const Ipp8u*)pSrc2;
    209    int k;
    210    int isEqu;
    211    for(k=0, isEqu=1; k<len && isEqu; k++)
    212       isEqu = (p1[k] == p2[k]);
    213    return isEqu;
    214 }
    215 
    216 
    217 /* addition functions for CTR mode of diffenent block ciphers */
    218 #if 0
    219 __INLINE void StdIncrement(Ipp8u* pCounter, int blkSize, int numSize)
    220 {
    221    int maskPosition = (blkSize-numSize)/8;
    222    Ipp8u mask = (Ipp8u)( 0xFF >> (blkSize-numSize)%8 );
    223 
    224    /* save critical byte */
    225    Ipp8u save  = (Ipp8u)( pCounter[maskPosition] & ~mask );
    226 
    227    int len = BITS2WORD8_SIZE(blkSize);
    228    Ipp32u carry = 1;
    229    for(; (len>maskPosition) && carry; len--) {
    230       Ipp32u x = pCounter[len-1] + carry;
    231       pCounter[len-1] = (Ipp8u)x;
    232       carry = (x>>8) & 0xFF;
    233    }
    234 
    235    /* update crytical byte */
    236    pCounter[maskPosition] &= mask;
    237    pCounter[maskPosition] |= save;
    238 }
    239 #endif
    240 
    241 /* const-exe-time version */
    242 __INLINE void StdIncrement(Ipp8u* pCounter, int blkBitSize, int numSize)
    243 {
    244    int maskPosition = (blkBitSize -numSize)/8;
    245    Ipp8u maskVal = (Ipp8u)( 0xFF >> (blkBitSize -numSize)%8 );
    246 
    247    int i;
    248    Ipp32u carry = 1;
    249    for(i=BITS2WORD8_SIZE(blkBitSize)-1; i>=0; i--) {
    250       int d = maskPosition - i;
    251       Ipp8u mask = maskVal | (Ipp8u)cpIsMsb_ct(d);
    252 
    253       Ipp32u x = pCounter[i] + carry;
    254       Ipp8u y = pCounter[i];
    255       pCounter[i] = (Ipp8u)((y & ~mask) | (x & mask));
    256 
    257       maskVal &= cpIsMsb_ct(d);
    258 
    259       carry = (x>>8) & 0x1;
    260    }
    261 }
    262 
    263 /* vb */
    264 __INLINE void ompStdIncrement64( void* pInitCtrVal, void* pCurrCtrVal,
    265                                 int ctrNumBitSize, int n )
    266 {
    267     int    k;
    268     Ipp64u cntr;
    269     Ipp64u temp;
    270     Ipp64s item;
    271 
    272   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    273     for( k = 0; k < 8; k++ )
    274         ( ( Ipp8u* )&cntr )[k] = ( ( Ipp8u* )pInitCtrVal )[7 - k];
    275   #else
    276     for( k = 0; k < 8; k++ )
    277         ( ( Ipp8u* )&cntr )[k] = ( ( Ipp8u* )pInitCtrVal )[k];
    278   #endif
    279 
    280     if( ctrNumBitSize == 64 )
    281     {
    282         cntr += ( Ipp64u )n;
    283     }
    284     else
    285     {
    286         Ipp64u mask = CONST_64(0xFFFFFFFFFFFFFFFF) >> ( 64 - ctrNumBitSize );
    287         Ipp64u save = cntr & ( ~mask );
    288         Ipp64u bndr = ( Ipp64u )1 << ctrNumBitSize;
    289 
    290         temp = cntr & mask;
    291         cntr = temp + ( Ipp64u )n;
    292 
    293         if( cntr > bndr )
    294         {
    295             item = ( Ipp64s )n - ( Ipp64s )( bndr - temp );
    296 
    297             while( item > 0 )
    298             {
    299                 cntr  = ( Ipp64u )item;
    300                 item -= ( Ipp64s )bndr;
    301             }
    302         }
    303 
    304         cntr = save | ( cntr & mask );
    305     }
    306 
    307   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    308     for( k = 0; k < 8; k++ )
    309         ( ( Ipp8u* )pCurrCtrVal )[7 - k] = ( ( Ipp8u* )&cntr )[k];
    310   #else
    311     for( k = 0; k < 8; k++ )
    312         ( ( Ipp8u* )pCurrCtrVal )[k] = ( ( Ipp8u* )&cntr )[k];
    313   #endif
    314 }
    315 
    316 
    317 /* vb */
    318 __INLINE void ompStdIncrement128( void* pInitCtrVal, void* pCurrCtrVal,
    319                                  int ctrNumBitSize, int n )
    320 {
    321     int    k;
    322     Ipp64u low;
    323     Ipp64u hgh;
    324     Ipp64u flag;
    325     Ipp64u mask = CONST_64(0xFFFFFFFFFFFFFFFF);
    326     Ipp64u save;
    327 
    328   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    329     for( k = 0; k < 8; k++ )
    330     {
    331         ( ( Ipp8u* )&low )[k] = ( ( Ipp8u* )pInitCtrVal )[15 - k];
    332         ( ( Ipp8u* )&hgh )[k] = ( ( Ipp8u* )pInitCtrVal )[7 - k];
    333     }
    334   #else
    335     for( k = 0; k < 8; k++ )
    336     {
    337         ( ( Ipp8u* )&low )[k] = ( ( Ipp8u* )pInitCtrVal )[8 + k];
    338         ( ( Ipp8u* )&hgh )[k] = ( ( Ipp8u* )pInitCtrVal )[k];
    339     }
    340   #endif
    341 
    342     if( ctrNumBitSize == 64 )
    343     {
    344         low += ( Ipp64u )n;
    345     }
    346     else if( ctrNumBitSize < 64 )
    347     {
    348         Ipp64u bndr;
    349         Ipp64u cntr;
    350         Ipp64s item;
    351 
    352         mask >>= ( 64 - ctrNumBitSize );
    353         save   = low & ( ~mask );
    354         cntr   = ( low & mask ) + ( Ipp64u )n;
    355 
    356         if( ctrNumBitSize < 31 )
    357         {
    358             bndr = ( Ipp64u )1 << ctrNumBitSize;
    359 
    360             if( cntr > bndr )
    361             {
    362                 item = ( Ipp64s )( ( Ipp64s )n - ( ( Ipp64s )bndr -
    363                 ( Ipp64s )( low & mask ) ) );
    364 
    365                 while( item > 0 )
    366                 {
    367                     cntr  = ( Ipp64u )item;
    368                     item -= ( Ipp64s )bndr;
    369                 }
    370             }
    371         }
    372 
    373         low = save | ( cntr & mask );
    374     }
    375     else
    376     {
    377         flag = ( low >> 63 );
    378 
    379         if( ctrNumBitSize != 128 )
    380         {
    381             mask >>= ( 128 - ctrNumBitSize );
    382             save   = hgh & ( ~mask );
    383             hgh   &= mask;
    384         }
    385 
    386         low += ( Ipp64u )n;
    387 
    388         if( flag != ( low >> 63 ) ) hgh++;
    389 
    390         if( ctrNumBitSize != 128 )
    391         {
    392             hgh = save | ( hgh & mask );
    393         }
    394     }
    395 
    396   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    397     for( k = 0; k < 8; k++ )
    398     {
    399         ( ( Ipp8u* )pCurrCtrVal )[15 - k] = ( ( Ipp8u* )&low )[k];
    400         ( ( Ipp8u* )pCurrCtrVal )[7 - k]  = ( ( Ipp8u* )&hgh )[k];
    401     }
    402   #else
    403     for( k = 0; k < 8; k++ )
    404     {
    405         ( ( Ipp8u* )pCurrCtrVal )[8 + k] = ( ( Ipp8u* )&low )[k];
    406         ( ( Ipp8u* )pCurrCtrVal )[k]     = ( ( Ipp8u* )&hgh )[k];
    407     }
    408   #endif
    409 }
    410 
    411 #if 0
    412 /* vb */
    413 __INLINE void ompStdIncrement192( void* pInitCtrVal, void* pCurrCtrVal,
    414                                 int ctrNumBitSize, int n )
    415 {
    416     int    k;
    417     Ipp64u low;
    418     Ipp64u mdl;
    419     Ipp64u hgh;
    420     Ipp64u flag;
    421     Ipp64u mask = CONST_64(0xFFFFFFFFFFFFFFFF);
    422     Ipp64u save;
    423 
    424   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    425     for( k = 0; k < 8; k++ )
    426     {
    427         ( ( Ipp8u* )&low )[k] = ( ( Ipp8u* )pInitCtrVal )[23 - k];
    428         ( ( Ipp8u* )&mdl )[k] = ( ( Ipp8u* )pInitCtrVal )[15 - k];
    429         ( ( Ipp8u* )&hgh )[k] = ( ( Ipp8u* )pInitCtrVal )[7  - k];
    430     }
    431   #else
    432     for( k = 0; k < 8; k++ )
    433     {
    434         ( ( Ipp8u* )&low )[k] = ( ( Ipp8u* )pInitCtrVal )[16 + k];
    435         ( ( Ipp8u* )&mdl )[k] = ( ( Ipp8u* )pInitCtrVal )[8  + k];
    436         ( ( Ipp8u* )&hgh )[k] = ( ( Ipp8u* )pInitCtrVal )[k];
    437     }
    438   #endif
    439 
    440     if( ctrNumBitSize == 64 )
    441     {
    442         low += ( Ipp64u )n;
    443     }
    444     else if( ctrNumBitSize == 128 )
    445     {
    446         flag = ( low >> 63 );
    447         low += ( Ipp64u )n;
    448         if( flag != ( low >> 63 ) ) mdl++;
    449     }
    450     else if( ctrNumBitSize == 192 )
    451     {
    452         flag = ( low >> 63 );
    453         low += ( Ipp64u )n;
    454 
    455         if( flag != ( low >> 63 ) )
    456         {
    457             flag = ( mdl >> 63 );
    458             mdl++;
    459             if( flag != ( mdl >> 63 ) ) hgh++;
    460         }
    461     }
    462     else if( ctrNumBitSize < 64 )
    463     {
    464         Ipp64u bndr;
    465         Ipp64u cntr;
    466         Ipp64s item;
    467 
    468         mask >>= ( 64 - ctrNumBitSize );
    469         save   = low & ( ~mask );
    470         cntr   = ( low & mask ) + ( Ipp64u )n;
    471 
    472         if( ctrNumBitSize < 31 )
    473         {
    474             bndr = ( Ipp64u )1 << ctrNumBitSize;
    475 
    476             if( cntr > bndr )
    477             {
    478                 item = ( Ipp64s )( ( Ipp64s )n - ( ( Ipp64s )bndr -
    479                     ( Ipp64s )( low & mask ) ) );
    480 
    481                 while( item > 0 )
    482                 {
    483                     cntr  = ( Ipp64u )item;
    484                     item -= ( Ipp64s )bndr;
    485                 }
    486             }
    487         }
    488 
    489         low = save | ( cntr & mask );
    490     }
    491     else if( ctrNumBitSize < 128 )
    492     {
    493         flag   = ( low >> 63 );
    494         mask >>= ( 128 - ctrNumBitSize );
    495         save   = mdl & ( ~mask );
    496         mdl   &= mask;
    497         low   += ( Ipp64u )n;
    498         if( flag != ( low >> 63 ) ) mdl++;
    499         mdl    = save | ( mdl & mask );
    500     }
    501     else
    502     {
    503         flag   = ( low >> 63 );
    504         mask >>= ( 192 - ctrNumBitSize );
    505         save   = hgh & ( ~mask );
    506         hgh   &= mask;
    507         low   += ( Ipp64u )n;
    508 
    509         if( flag != ( low >> 63 ) )
    510         {
    511             flag = ( mdl >> 63 );
    512             mdl++;
    513             if( flag != ( mdl >> 63 ) ) hgh++;
    514         }
    515 
    516         hgh    = save | ( hgh & mask );
    517     }
    518 
    519   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    520     for( k = 0; k < 8; k++ )
    521     {
    522         ( ( Ipp8u* )pCurrCtrVal )[23 - k] = ( ( Ipp8u* )&low )[k];
    523         ( ( Ipp8u* )pCurrCtrVal )[15 - k] = ( ( Ipp8u* )&mdl )[k];
    524         ( ( Ipp8u* )pCurrCtrVal )[7  - k] = ( ( Ipp8u* )&hgh )[k];
    525     }
    526   #else
    527     for( k = 0; k < 8; k++ )
    528     {
    529         ( ( Ipp8u* )pCurrCtrVal )[16 + k] = ( ( Ipp8u* )&low )[k];
    530         ( ( Ipp8u* )pCurrCtrVal )[8  + k] = ( ( Ipp8u* )&mdl )[k];
    531         ( ( Ipp8u* )pCurrCtrVal )[k]      = ( ( Ipp8u* )&hgh )[k];
    532     }
    533   #endif
    534 }
    535 #endif
    536 
    537 #if 0
    538 /* vb */
    539 __INLINE void ompStdIncrement256( void* pInitCtrVal, void* pCurrCtrVal,
    540                                  int ctrNumBitSize, int n )
    541 {
    542     int    k;
    543     Ipp64u low;
    544     Ipp64u mdl;
    545     Ipp64u mdm;
    546     Ipp64u hgh;
    547     Ipp64u flag;
    548     Ipp64u mask = CONST_64(0xFFFFFFFFFFFFFFFF);
    549     Ipp64u save;
    550 
    551   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    552     for( k = 0; k < 8; k++ )
    553     {
    554         ( ( Ipp8u* )&low )[k] = ( ( Ipp8u* )pInitCtrVal )[31 - k];
    555         ( ( Ipp8u* )&mdl )[k] = ( ( Ipp8u* )pInitCtrVal )[23 - k];
    556         ( ( Ipp8u* )&mdm )[k] = ( ( Ipp8u* )pInitCtrVal )[15 - k];
    557         ( ( Ipp8u* )&hgh )[k] = ( ( Ipp8u* )pInitCtrVal )[7  - k];
    558     }
    559   #else
    560     for( k = 0; k < 8; k++ )
    561     {
    562         ( ( Ipp8u* )&low )[k] = ( ( Ipp8u* )pInitCtrVal )[24 + k];
    563         ( ( Ipp8u* )&mdl )[k] = ( ( Ipp8u* )pInitCtrVal )[16 + k];
    564         ( ( Ipp8u* )&mdm )[k] = ( ( Ipp8u* )pInitCtrVal )[8  + k];
    565         ( ( Ipp8u* )&hgh )[k] = ( ( Ipp8u* )pInitCtrVal )[k];
    566     }
    567   #endif
    568 
    569     if( ctrNumBitSize == 64 )
    570     {
    571         low += ( Ipp64u )n;
    572     }
    573     else if( ctrNumBitSize == 128 )
    574     {
    575         flag = ( low >> 63 );
    576         low += ( Ipp64u )n;
    577         if( flag != ( low >> 63 ) ) mdl++;
    578     }
    579     else if( ctrNumBitSize == 192 )
    580     {
    581         flag = ( low >> 63 );
    582         low += ( Ipp64u )n;
    583 
    584         if( flag != ( low >> 63 ) )
    585         {
    586             flag = ( mdl >> 63 );
    587             mdl++;
    588             if( flag != ( mdl >> 63 ) ) hgh++;
    589         }
    590     }
    591     else if( ctrNumBitSize == 256 )
    592     {
    593         flag = ( low >> 63 );
    594         low += ( Ipp64u )n;
    595 
    596         if( flag != ( low >> 63 ) )
    597         {
    598             flag = ( mdl >> 63 );
    599             mdl++;
    600 
    601             if( flag != ( mdl >> 63 ) )
    602             {
    603                 flag = ( mdm >> 63 );
    604                 mdm++;
    605                 if( flag != ( mdm >> 63 ) ) hgh++;
    606             }
    607         }
    608     }
    609     else if( ctrNumBitSize < 64 )
    610     {
    611         Ipp64u bndr;
    612         Ipp64u cntr;
    613         Ipp64s item;
    614 
    615         mask >>= ( 64 - ctrNumBitSize );
    616         save   = low & ( ~mask );
    617         cntr   = ( low & mask ) + ( Ipp64u )n;
    618 
    619         if( ctrNumBitSize < 31 )
    620         {
    621             bndr = ( Ipp64u )1 << ctrNumBitSize;
    622 
    623             if( cntr > bndr )
    624             {
    625                 item = ( Ipp64s )( ( Ipp64s )n - ( ( Ipp64s )bndr -
    626                     ( Ipp64s )( low & mask ) ) );
    627 
    628                 while( item > 0 )
    629                 {
    630                     cntr  = ( Ipp64u )item;
    631                     item -= ( Ipp64s )bndr;
    632                 }
    633             }
    634         }
    635 
    636         low = save | ( cntr & mask );
    637     }
    638     else if( ctrNumBitSize < 128 )
    639     {
    640         flag   = ( low >> 63 );
    641         mask >>= ( 128 - ctrNumBitSize );
    642         save   = mdl & ( ~mask );
    643         mdl   &= mask;
    644         low   += ( Ipp64u )n;
    645         if( flag != ( low >> 63 ) ) mdl++;
    646         mdl    = save | ( mdl & mask );
    647     }
    648     else if( ctrNumBitSize < 192 )
    649     {
    650         flag   = ( low >> 63 );
    651         mask >>= ( 192 - ctrNumBitSize );
    652         save   = mdm & ( ~mask );
    653         mdm   &= mask;
    654         low   += ( Ipp64u )n;
    655 
    656         if( flag != ( low >> 63 ) )
    657         {
    658             flag = ( mdl >> 63 );
    659             mdl++;
    660             if( flag != ( mdl >> 63 ) ) mdm++;
    661         }
    662 
    663         mdm    = save | ( mdm & mask );
    664     }
    665     else
    666     {
    667         flag   = ( low >> 63 );
    668         mask >>= ( 256 - ctrNumBitSize );
    669         save   = hgh & ( ~mask );
    670         hgh   &= mask;
    671         low   += ( Ipp64u )n;
    672 
    673         if( flag != ( low >> 63 ) )
    674         {
    675             flag = ( mdl >> 63 );
    676             mdl++;
    677 
    678             if( flag != ( mdl >> 63 ) )
    679             {
    680                 flag = ( mdm >> 63 );
    681                 mdm++;
    682                 if( flag != ( mdm >> 63 ) ) hgh++;
    683             }
    684         }
    685 
    686         hgh    = save | ( hgh & mask );
    687     }
    688 
    689   #if( IPP_ENDIAN == IPP_LITTLE_ENDIAN )
    690     for( k = 0; k < 8; k++ )
    691     {
    692         ( ( Ipp8u* )pCurrCtrVal )[31 - k] = ( ( Ipp8u* )&low )[k];
    693         ( ( Ipp8u* )pCurrCtrVal )[23 - k] = ( ( Ipp8u* )&mdl )[k];
    694         ( ( Ipp8u* )pCurrCtrVal )[15 - k] = ( ( Ipp8u* )&mdm )[k];
    695         ( ( Ipp8u* )pCurrCtrVal )[7  - k] = ( ( Ipp8u* )&hgh )[k];
    696     }
    697   #else
    698     for( k = 0; k < 8; k++ )
    699     {
    700         ( ( Ipp8u* )pCurrCtrVal )[24 + k] = ( ( Ipp8u* )&low )[k];
    701         ( ( Ipp8u* )pCurrCtrVal )[16 + k] = ( ( Ipp8u* )&mdl )[k];
    702         ( ( Ipp8u* )pCurrCtrVal )[8  + k] = ( ( Ipp8u* )&mdm )[k];
    703         ( ( Ipp8u* )pCurrCtrVal )[k]      = ( ( Ipp8u* )&hgh )[k];
    704     }
    705   #endif
    706 }
    707 #endif
    708 
    709 #endif /* _CP_TOOL_H */
    710