Home | History | Annotate | Download | only in b_TensorEm
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /* ---- includes ----------------------------------------------------------- */
     18 
     19 #include "b_TensorEm/Cluster2D.h"
     20 #include "b_TensorEm/RBFMap2D.h"
     21 #include "b_BasicEm/Math.h"
     22 #include "b_BasicEm/Memory.h"
     23 #include "b_BasicEm/Functions.h"
     24 
     25 /* ------------------------------------------------------------------------- */
     26 
     27 /* ========================================================================= */
     28 /*                                                                           */
     29 /* ---- \ghd{ auxiliary functions } ---------------------------------------- */
     30 /*                                                                           */
     31 /* ========================================================================= */
     32 
     33 /* ------------------------------------------------------------------------- */
     34 
     35 /** Computes relative scale factor from the 2 mean square node distances to the
     36  *	cluster centers for 2 clusters.
     37  */
     38 void bts_Cluster2D_computeScale( uint32 enumA,		/* mean square radius, dst cluster */
     39 								 int32 bbp_enumA,	/* bbp of enumA */
     40 								 uint32 denomA,		/* mean square radius, src cluster */
     41 								 int32 bbp_denomA,	/* bbp of denomA */
     42 								 uint32* scaleA,	/* resulting scale factor */
     43 								 int32* bbp_scaleA )/* bbp of scale factor */
     44 {
     45 	uint32 shiftL, quotientL;
     46 	int32 posL, bbp_denomL;
     47 
     48 	/* how far can we shift enumA to the left */
     49 	shiftL = 31 - bbs_intLog2( enumA );
     50 
     51 	/* how far do we have to shift denomA to the right */
     52 	posL = bbs_intLog2( denomA ) + 1;
     53 	bbp_denomL = bbp_denomA;
     54 
     55 	if( posL - bbp_denomL > 12 )
     56 	{
     57 		/* if denomA has more than 12 bit before the point, discard bits behind the point */
     58 		denomA >>= bbp_denomL;
     59 		bbp_denomL = 0;
     60 	}
     61 	else
     62 	{
     63 		/* otherwise reduce denomA to 12 bit */
     64 		bbs_uint32ReduceToNBits( &denomA, &bbp_denomL, 12 );
     65 	}
     66 
     67 	/* make result bbp even for call of sqrt */
     68 	if( ( bbp_enumA + shiftL - bbp_denomL ) & 1 ) shiftL--;
     69 
     70 	quotientL = ( enumA << shiftL ) / denomA;
     71 
     72 	*scaleA = bbs_fastSqrt32( quotientL );
     73 	*bbp_scaleA = ( bbp_enumA + shiftL - bbp_denomL ) >> 1;
     74 }
     75 
     76 /* ------------------------------------------------------------------------- */
     77 
     78 /* ========================================================================= */
     79 /*                                                                           */
     80 /* ---- \ghd{ constructor / destructor } ----------------------------------- */
     81 /*                                                                           */
     82 /* ========================================================================= */
     83 
     84 /* ------------------------------------------------------------------------- */
     85 
     86 void bts_Cluster2D_init( struct bbs_Context* cpA,
     87 						 struct bts_Cluster2D* ptrA )
     88 {
     89 	ptrA->mspE = NULL;
     90 	ptrA->vecArrE = NULL;
     91 	ptrA->allocatedSizeE = 0;
     92 	ptrA->sizeE = 0;
     93 	ptrA->bbpE = 0;
     94 }
     95 
     96 /* ------------------------------------------------------------------------- */
     97 
     98 void bts_Cluster2D_exit( struct bbs_Context* cpA,
     99 						 struct bts_Cluster2D* ptrA )
    100 {
    101 	bbs_MemSeg_free( cpA, ptrA->mspE, ptrA->vecArrE );
    102 	ptrA->vecArrE = NULL;
    103 	ptrA->mspE = NULL;
    104 	ptrA->allocatedSizeE = 0;
    105 	ptrA->sizeE = 0;
    106 	ptrA->bbpE = 0;
    107 }
    108 
    109 /* ------------------------------------------------------------------------- */
    110 
    111 /* ========================================================================= */
    112 /*                                                                           */
    113 /* ---- \ghd{ operators } -------------------------------------------------- */
    114 /*                                                                           */
    115 /* ========================================================================= */
    116 
    117 /* ------------------------------------------------------------------------- */
    118 
    119 void bts_Cluster2D_copy( struct bbs_Context* cpA,
    120 						 struct bts_Cluster2D* ptrA,
    121 						 const struct bts_Cluster2D* srcPtrA )
    122 {
    123 #ifdef DEBUG2
    124 	if( ptrA->allocatedSizeE < srcPtrA->sizeE )
    125 	{
    126 		bbs_ERROR0( "void bts_Cluster2D_copy( struct bts_Cluster2D* ptrA, const struct bts_Cluster2D* srcPtrA ): allocated size too low in destination cluster" );
    127 		return;
    128 	}
    129 #endif
    130 
    131 	bbs_memcpy32( ptrA->vecArrE, srcPtrA->vecArrE, bbs_SIZEOF32( struct bts_Int16Vec2D ) * srcPtrA->sizeE );
    132 
    133 	ptrA->bbpE = srcPtrA->bbpE;
    134 	ptrA->sizeE = srcPtrA->sizeE;
    135 }
    136 
    137 /* ------------------------------------------------------------------------- */
    138 
    139 flag bts_Cluster2D_equal( struct bbs_Context* cpA,
    140 						  const struct bts_Cluster2D* ptrA,
    141 						  const struct bts_Cluster2D* srcPtrA )
    142 {
    143 	uint32 iL;
    144 	const struct bts_Int16Vec2D* src1L = ptrA->vecArrE;
    145 	const struct bts_Int16Vec2D* src2L = srcPtrA->vecArrE;
    146 
    147 	if( ptrA->sizeE != srcPtrA->sizeE ) return FALSE;
    148 	if( ptrA->bbpE != srcPtrA->bbpE ) return FALSE;
    149 
    150 	for( iL = ptrA->sizeE; iL > 0; iL-- )
    151 	{
    152 		if( ( src1L->xE != src2L->xE ) || ( src1L->yE != src2L->yE ) ) return FALSE;
    153 		src1L++;
    154 		src2L++;
    155 	}
    156 
    157 	return TRUE;
    158 }
    159 
    160 /* ------------------------------------------------------------------------- */
    161 
    162 /* ========================================================================= */
    163 /*                                                                           */
    164 /* ---- \ghd{ query functions } -------------------------------------------- */
    165 /*                                                                           */
    166 /* ========================================================================= */
    167 
    168 /* ------------------------------------------------------------------------- */
    169 
    170 struct bts_Flt16Vec2D bts_Cluster2D_center( struct bbs_Context* cpA,
    171 										    const struct bts_Cluster2D* ptrA )
    172 {
    173 	struct bts_Int16Vec2D* vecPtrL = ptrA->vecArrE;
    174 	uint32 iL;
    175 	int32 xL = 0;
    176 	int32 yL = 0;
    177 
    178 	if( ptrA->sizeE == 0 ) return bts_Flt16Vec2D_create16( 0, 0, 0 );
    179 
    180 	for( iL = ptrA->sizeE; iL > 0; iL-- )
    181 	{
    182 		xL += vecPtrL->xE;
    183 		yL += vecPtrL->yE;
    184 		vecPtrL++;
    185 	}
    186 
    187 	xL = ( ( ( xL << 1 ) / ( int32 )ptrA->sizeE ) + 1 ) >> 1;
    188 	yL = ( ( ( yL << 1 ) / ( int32 )ptrA->sizeE ) + 1 ) >> 1;
    189 
    190 	return bts_Flt16Vec2D_create16( ( int16 )xL, ( int16 )yL, ( int16 )ptrA->bbpE );
    191 }
    192 
    193 /* ------------------------------------------------------------------------- */
    194 
    195 uint32 bts_Cluster2D_checkSum( struct bbs_Context* cpA,
    196 							   const struct bts_Cluster2D* ptrA )
    197 {
    198 	struct bts_Int16Vec2D* vecPtrL = ptrA->vecArrE;
    199 	uint32 iL;
    200 	int32 sumL = ptrA->bbpE;
    201 
    202 	for( iL = ptrA->sizeE; iL > 0; iL-- )
    203 	{
    204 		sumL += vecPtrL->xE;
    205 		sumL += vecPtrL->yE;
    206 		vecPtrL++;
    207 	}
    208 
    209 	return (uint32)sumL;
    210 }
    211 
    212 /* ------------------------------------------------------------------------- */
    213 
    214 struct bts_Int16Rect bts_Cluster2D_boundingBox( struct bbs_Context* cpA,
    215 											    const struct bts_Cluster2D* ptrA )
    216 {
    217 	struct bts_Int16Vec2D* vecPtrL = ptrA->vecArrE;
    218 	uint32 iL;
    219 	int32 xMinL = 65536;
    220 	int32 yMinL = 65536;
    221 	int32 xMaxL = -65536;
    222 	int32 yMaxL = -65536;
    223 
    224 	if( ptrA->sizeE == 0 ) return bts_Int16Rect_create( 0, 0, 0, 0 );
    225 
    226 	for( iL = ptrA->sizeE; iL > 0; iL-- )
    227 	{
    228 		xMinL = bbs_min( xMinL, vecPtrL->xE );
    229 		yMinL = bbs_min( yMinL, vecPtrL->yE );
    230 		xMaxL = bbs_max( xMaxL, vecPtrL->xE );
    231 		yMaxL = bbs_max( yMaxL, vecPtrL->yE );
    232 		vecPtrL++;
    233 	}
    234 
    235 	return bts_Int16Rect_create( ( int16 )xMinL, ( int16 )yMinL, ( int16 )xMaxL, ( int16 )yMaxL );
    236 }
    237 
    238 
    239 /* ------------------------------------------------------------------------- */
    240 
    241 int32 bts_Cluster2D_int32X( struct bbs_Context* cpA,
    242 						    const struct bts_Cluster2D* ptrA,
    243 							uint32 indexA, int32 bbpA )
    244 {
    245 #ifdef DEBUG2
    246 	if( indexA >= ptrA->sizeE )
    247 	{
    248 		bbs_ERROR2( "int32 bts_Cluster2D_int32X( .... )\n"
    249 			       "indexA = %i is out of range [0,%i]",
    250 				   indexA,
    251 				   ptrA->sizeE - 1 );
    252 		return 0;
    253 	}
    254 #endif
    255 
    256 	{
    257 		int32 shiftL = bbpA - ptrA->bbpE;
    258 		int32 xL = ptrA->vecArrE[ indexA ].xE;
    259 		if( shiftL >= 0 )
    260 		{
    261 			xL <<= shiftL;
    262 		}
    263 		else
    264 		{
    265 			xL = ( ( xL >> ( -shiftL - 1 ) ) + 1 ) >> 1;
    266 		}
    267 
    268 		return xL;
    269 	}
    270 }
    271 
    272 /* ------------------------------------------------------------------------- */
    273 
    274 int32 bts_Cluster2D_int32Y( struct bbs_Context* cpA,
    275 						    const struct bts_Cluster2D* ptrA,
    276 							uint32 indexA,
    277 							int32 bbpA )
    278 {
    279 #ifdef DEBUG2
    280 	if( indexA >= ptrA->sizeE )
    281 	{
    282 		bbs_ERROR2( "int32 bts_Cluster2D_int32Y( .... )\n"
    283 			       "indexA = %i is out of range [0,%i]",
    284 				   indexA,
    285 				   ptrA->sizeE - 1 );
    286 		return 0;
    287 	}
    288 #endif
    289 	{
    290 		int32 shiftL = bbpA - ptrA->bbpE;
    291 		int32 yL = ptrA->vecArrE[ indexA ].yE;
    292 		if( shiftL >= 0 )
    293 		{
    294 			yL <<= shiftL;
    295 		}
    296 		else
    297 		{
    298 			yL = ( ( yL >> ( -shiftL - 1 ) ) + 1 ) >> 1;
    299 		}
    300 
    301 		return yL;
    302 	}
    303 }
    304 
    305 /* ------------------------------------------------------------------------- */
    306 
    307 /* ========================================================================= */
    308 /*                                                                           */
    309 /* ---- \ghd{ modify functions } ------------------------------------------- */
    310 /*                                                                           */
    311 /* ========================================================================= */
    312 
    313 /* ------------------------------------------------------------------------- */
    314 
    315 void bts_Cluster2D_create( struct bbs_Context* cpA,
    316 						   struct bts_Cluster2D* ptrA,
    317 						   uint32 sizeA,
    318 						   struct bbs_MemSeg* mspA )
    319 {
    320 	if( bbs_Context_error( cpA ) ) return;
    321 	if( ptrA->mspE == NULL )
    322 	{
    323 		ptrA->sizeE = 0;
    324 		ptrA->allocatedSizeE = 0;
    325 		ptrA->vecArrE = NULL;
    326 	}
    327 
    328 	if( ptrA->sizeE == sizeA ) return;
    329 
    330 	if( ptrA->vecArrE != 0 )
    331 	{
    332 		bbs_ERROR0( "void bts_Cluster2D_create( const struct bts_Cluster2D*, uint32 ):\n"
    333 				   "object has already been created and cannot be resized." );
    334 		return;
    335 	}
    336 
    337 	ptrA->vecArrE = bbs_MemSeg_alloc( cpA, mspA, sizeA * bbs_SIZEOF16( struct bts_Int16Vec2D ) );
    338 	if( bbs_Context_error( cpA ) ) return;
    339 	ptrA->sizeE = sizeA;
    340 	ptrA->allocatedSizeE = sizeA;
    341 	if( !mspA->sharedE ) ptrA->mspE = mspA;
    342 }
    343 
    344 /* ------------------------------------------------------------------------- */
    345 
    346 void bts_Cluster2D_size( struct bbs_Context* cpA,
    347 						 struct bts_Cluster2D* ptrA,
    348 						 uint32 sizeA )
    349 {
    350 	if( ptrA->allocatedSizeE < sizeA )
    351 	{
    352 		bbs_ERROR2( "void bts_Cluster2D_size( struct bts_Cluster2D* ptrA, uint32 sizeA ):\n"
    353 				   "Allocated size (%i) of cluster is smaller than requested size (%i).",
    354 				   ptrA->allocatedSizeE,
    355 				   sizeA );
    356 		return;
    357 	}
    358 	ptrA->sizeE = sizeA;
    359 }
    360 
    361 /* ------------------------------------------------------------------------- */
    362 
    363 void bts_Cluster2D_transform( struct bbs_Context* cpA,
    364 							  struct bts_Cluster2D* ptrA,
    365 							  struct bts_Flt16Alt2D altA )
    366 {
    367 	uint32 iL;
    368 	for( iL = 0; iL < ptrA->sizeE; iL++ )
    369 	{
    370 		struct bts_Flt16Vec2D vL = bts_Flt16Vec2D_createVec16( ptrA->vecArrE[ iL ], ptrA->bbpE );
    371 		ptrA->vecArrE[ iL ] = bts_Flt16Vec2D_int16Vec2D( bts_Flt16Alt2D_mapFlt( &altA, &vL ), ptrA->bbpE );
    372 	}
    373 }
    374 
    375 /* ------------------------------------------------------------------------- */
    376 
    377 void bts_Cluster2D_transformBbp( struct bbs_Context* cpA,
    378 							     struct bts_Cluster2D* ptrA,
    379 							     struct bts_Flt16Alt2D altA,
    380 								 uint32 dstBbpA )
    381 {
    382 	uint32 iL;
    383 	for( iL = 0; iL < ptrA->sizeE; iL++ )
    384 	{
    385 		struct bts_Flt16Vec2D vL = bts_Flt16Vec2D_createVec16( ptrA->vecArrE[ iL ], ptrA->bbpE );
    386 		ptrA->vecArrE[ iL ] = bts_Flt16Vec2D_int16Vec2D( bts_Flt16Alt2D_mapFlt( &altA, &vL ), dstBbpA );
    387 	}
    388 	ptrA->bbpE = dstBbpA;
    389 }
    390 
    391 /* ------------------------------------------------------------------------- */
    392 
    393 void bts_Cluster2D_rbfTransform( struct bbs_Context* cpA,
    394 								 struct bts_Cluster2D* ptrA,
    395 								 const struct bts_RBFMap2D* rbfMapPtrA )
    396 {
    397 	bts_RBFMap2D_mapCluster( cpA, rbfMapPtrA, ptrA, ptrA, ptrA->bbpE );
    398 }
    399 
    400 /* ------------------------------------------------------------------------- */
    401 
    402 void bts_Cluster2D_copyTransform( struct bbs_Context* cpA,
    403 								  struct bts_Cluster2D* ptrA,
    404 								  const struct bts_Cluster2D* srcPtrA,
    405 								  struct bts_Flt16Alt2D altA,
    406 								  uint32 dstBbpA )
    407 {
    408 	uint32 iL;
    409 
    410 	/* prepare destination cluster */
    411 	if( ptrA->allocatedSizeE < srcPtrA->sizeE )
    412 	{
    413 		bbs_ERROR0( "void bts_Cluster2D_copyTransform( struct bts_Cluster2D* ptrA, const struct bts_Cluster2D* srcPtrA, struct bts_Flt16Alt2D altA, uint32 dstBbpA ): allocated size too low in destination cluster" );
    414 		return;
    415 	}
    416 
    417 	ptrA->sizeE = srcPtrA->sizeE;
    418 	ptrA->bbpE = dstBbpA;
    419 
    420 	/* transform */
    421 	for( iL = 0; iL < ptrA->sizeE; iL++ )
    422 	{
    423 		struct bts_Flt16Vec2D vL = bts_Flt16Vec2D_createVec16( srcPtrA->vecArrE[ iL ], srcPtrA->bbpE );
    424 		ptrA->vecArrE[ iL ] = bts_Flt16Vec2D_int16Vec2D( bts_Flt16Alt2D_mapFlt( &altA, &vL ), ptrA->bbpE );
    425 	}
    426 }
    427 
    428 /* ------------------------------------------------------------------------- */
    429 
    430 /* ========================================================================= */
    431 /*                                                                           */
    432 /* ---- \ghd{ I/O } -------------------------------------------------------- */
    433 /*                                                                           */
    434 /* ========================================================================= */
    435 
    436 /* ------------------------------------------------------------------------- */
    437 
    438 uint32 bts_Cluster2D_memSize( struct bbs_Context* cpA,
    439 							  const struct bts_Cluster2D *ptrA )
    440 {
    441 	return  bbs_SIZEOF16( uint32 ) /* mem size */
    442 		  + bbs_SIZEOF16( uint32 ) /* version */
    443 		  + bbs_SIZEOF16( ptrA->sizeE )
    444 		  + bbs_SIZEOF16( ptrA->bbpE )
    445 		  + bbs_SIZEOF16( struct bts_Int16Vec2D ) * ptrA->sizeE;
    446 }
    447 
    448 /* ------------------------------------------------------------------------- */
    449 
    450 uint32 bts_Cluster2D_memWrite( struct bbs_Context* cpA,
    451 							   const struct bts_Cluster2D* ptrA,
    452 							   uint16* memPtrA )
    453 {
    454 	uint32 memSizeL = bts_Cluster2D_memSize( cpA, ptrA );
    455 	memPtrA += bbs_memWrite32( &memSizeL, memPtrA );
    456 	memPtrA += bbs_memWriteUInt32( bts_CLUSTER2D_VERSION, memPtrA );
    457 	memPtrA += bbs_memWrite32( &ptrA->sizeE, memPtrA );
    458 	memPtrA += bbs_memWrite32( &ptrA->bbpE, memPtrA );
    459 	memPtrA += bbs_memWrite16Arr( cpA, ptrA->vecArrE, ptrA->sizeE * 2, memPtrA );
    460 	return memSizeL;
    461 }
    462 
    463 /* ------------------------------------------------------------------------- */
    464 
    465 uint32 bts_Cluster2D_memRead( struct bbs_Context* cpA,
    466 							  struct bts_Cluster2D* ptrA,
    467 							  const uint16* memPtrA,
    468 							  struct bbs_MemSeg* mspA )
    469 {
    470 	uint32 memSizeL;
    471 	uint32 sizeL;
    472 	uint32 versionL;
    473 	if( bbs_Context_error( cpA ) ) return 0;
    474 	memPtrA += bbs_memRead32( &memSizeL, memPtrA );
    475 	memPtrA += bbs_memReadVersion32( cpA, &versionL, bts_CLUSTER2D_VERSION, memPtrA );
    476 	memPtrA += bbs_memRead32( &sizeL, memPtrA );
    477 	memPtrA += bbs_memRead32( &ptrA->bbpE, memPtrA );
    478 
    479 	if( ptrA->allocatedSizeE < sizeL )
    480 	{
    481 		bts_Cluster2D_create( cpA, ptrA, sizeL, mspA );
    482 	}
    483 	else
    484 	{
    485 		bts_Cluster2D_size( cpA, ptrA, sizeL );
    486 	}
    487 
    488 	memPtrA += bbs_memRead16Arr( cpA, ptrA->vecArrE, ptrA->sizeE * 2, memPtrA );
    489 
    490 	if( memSizeL != bts_Cluster2D_memSize( cpA, ptrA ) )
    491 	{
    492 		bbs_ERR0( bbs_ERR_CORRUPT_DATA, "uint32 bts_Cluster2D_memRead( const struct bts_Cluster2D* ptrA, const void* memPtrA ):\n"
    493                    "size mismatch" );
    494 		return 0;
    495 	}
    496 	return memSizeL;
    497 }
    498 
    499 /* ------------------------------------------------------------------------- */
    500 
    501 /* ========================================================================= */
    502 /*                                                                           */
    503 /* ---- \ghd{ exec functions } --------------------------------------------- */
    504 /*                                                                           */
    505 /* ========================================================================= */
    506 
    507 /* ------------------------------------------------------------------------- */
    508 
    509 struct bts_Flt16Alt2D bts_Cluster2D_alt( struct bbs_Context* cpA,
    510 										 const struct bts_Cluster2D* srcPtrA,
    511 										 const struct bts_Cluster2D* dstPtrA,
    512 										 enum bts_AltType altTypeA )
    513 {
    514 	struct bts_Flt16Alt2D altL = bts_Flt16Alt2D_createIdentity();
    515 	enum bts_AltType altTypeL = altTypeA;
    516 
    517 	uint32 sizeL = srcPtrA->sizeE;
    518 	int32 srcBbpL = srcPtrA->bbpE;
    519 	int32 dstBbpL = dstPtrA->bbpE;
    520 
    521 	struct bts_Flt16Vec2D cpL, cqL, cpMappedL, cpAdjustedL;
    522 
    523 	if( dstPtrA->sizeE != srcPtrA->sizeE )
    524 	{
    525 		bbs_ERROR2( "struct bts_Flt16Alt2D bts_Cluster2D_alt( ... ):\n"
    526                    "the 2 input clusters differ in size: %d vs %d", srcPtrA->sizeE, dstPtrA->sizeE );
    527 	}
    528 
    529 	if( sizeL <= 2 )
    530 	{
    531 		if( altTypeL == bts_ALT_LINEAR )
    532 		{
    533 			altTypeL = bts_ALT_RIGID;
    534 		}
    535 	}
    536 
    537 	if( sizeL <= 1 )
    538 	{
    539 		if( altTypeL == bts_ALT_RIGID )
    540 		{
    541 			altTypeL = bts_ALT_TRANS;
    542 		}
    543 		else if( altTypeL == bts_ALT_TRANS_SCALE )
    544 		{
    545 			altTypeL = bts_ALT_TRANS;
    546 		}
    547 	}
    548 
    549 	if( sizeL == 0 || altTypeL == bts_ALT_IDENTITY )
    550 	{
    551 		/* return identity */
    552 		return altL;
    553 	}
    554 
    555 	cpL = bts_Cluster2D_center( cpA, srcPtrA );
    556 	cqL = bts_Cluster2D_center( cpA, dstPtrA );
    557 
    558 	if( altTypeL == bts_ALT_TRANS )
    559 	{
    560 		/* return translation only */
    561 		altL.vecE = bts_Flt16Vec2D_sub( cqL, cpL );
    562 		return altL;
    563 	}
    564 
    565 	switch( altTypeL )
    566 	{
    567 		case bts_ALT_TRANS_SCALE:
    568 		{
    569 			uint32 spL = 0;
    570 			uint32 sqL = 0;
    571 
    572 			struct bts_Int16Vec2D* srcPtrL = srcPtrA->vecArrE;
    573 			struct bts_Int16Vec2D* dstPtrL = dstPtrA->vecArrE;
    574 
    575 			int32 iL = sizeL;
    576 			while( iL-- )
    577 			{
    578 				int32 pxL = srcPtrL->xE - cpL.xE;
    579 				int32 pyL = srcPtrL->yE - cpL.yE;
    580 				int32 qxL = dstPtrL->xE - cqL.xE;
    581 				int32 qyL = dstPtrL->yE - cqL.yE;
    582 				srcPtrL++;
    583 				dstPtrL++;
    584 
    585 				/* overflow estimate: no problem with  100 nodes,  bbp = 6,  x = y = 500 */
    586 				spL += ( pxL * pxL ) >> srcBbpL;
    587 				spL += ( pyL * pyL ) >> srcBbpL;
    588 				sqL += ( qxL * qxL ) >> dstBbpL;
    589 				sqL += ( qyL * qyL ) >> dstBbpL;
    590 			}
    591 
    592 			spL /= sizeL;
    593 			sqL /= sizeL;
    594 
    595 			if( spL == 0 )
    596 			{
    597 				bbs_ERROR0( "struct bts_Flt16Alt2D bts_Cluster2D_alt( ... ):\n"
    598 						   "All nodes of the src cluster are sitting in the center -> "
    599 						   "unable to compute scale matrix between clusters" );
    600 			}
    601 			else
    602 			{
    603 				uint32 scaleL;
    604 				int32 factor32L, bbp_scaleL;
    605 				int16 factor16L;
    606 
    607 				bts_Cluster2D_computeScale( sqL, dstBbpL, spL, srcBbpL, &scaleL, &bbp_scaleL );
    608 
    609 				/* create scale matrix */
    610 				factor32L = ( int32 )scaleL;
    611 				altL.matE = bts_Flt16Mat2D_createScale( factor32L, bbp_scaleL );
    612 
    613 				/* create translation vector */
    614 				factor16L = scaleL;
    615 				cpMappedL = bts_Flt16Vec2D_mul( cpL, factor16L, bbp_scaleL );
    616 				altL.vecE = bts_Flt16Vec2D_sub( cqL, cpMappedL );
    617 
    618 				return altL;
    619 			}
    620 		}
    621 		break;
    622 
    623 		case bts_ALT_RIGID:
    624 		{
    625 			/* smaller of the 2 bbp's */
    626 			int32 minBbpL = bbs_min( srcBbpL, dstBbpL );
    627 
    628 			uint32 spL = 0;
    629 			uint32 sqL = 0;
    630 			int32 pxqxL = 0;
    631 			int32 pxqyL = 0;
    632 			int32 pyqxL = 0;
    633 			int32 pyqyL = 0;
    634 
    635 			struct bts_Int16Vec2D* srcPtrL = srcPtrA->vecArrE;
    636 			struct bts_Int16Vec2D* dstPtrL = dstPtrA->vecArrE;
    637 
    638 			int32 iL = sizeL;
    639 			while( iL-- )
    640 			{
    641 				int32 pxL = srcPtrL->xE - cpL.xE;
    642 				int32 pyL = srcPtrL->yE - cpL.yE;
    643 				int32 qxL = dstPtrL->xE - cqL.xE;
    644 				int32 qyL = dstPtrL->yE - cqL.yE;
    645 				srcPtrL++;
    646 				dstPtrL++;
    647 
    648 				/* overflow estimate: no problem with  100 nodes,  bbp = 6,  x = y = 500 */
    649 				spL += ( pxL * pxL ) >> srcBbpL;
    650 				spL += ( pyL * pyL ) >> srcBbpL;
    651 				sqL += ( qxL * qxL ) >> dstBbpL;
    652 				sqL += ( qyL * qyL ) >> dstBbpL;
    653 
    654 				pxqxL += ( pxL * qxL ) >> minBbpL;
    655 				pxqyL += ( pxL * qyL ) >> minBbpL;
    656 				pyqxL += ( pyL * qxL ) >> minBbpL;
    657 				pyqyL += ( pyL * qyL ) >> minBbpL;
    658 			}
    659 
    660 			spL /= sizeL;
    661 			sqL /= sizeL;
    662 			pxqxL /= ( int32 )sizeL;
    663 			pxqyL /= ( int32 )sizeL;
    664 			pyqxL /= ( int32 )sizeL;
    665 			pyqyL /= ( int32 )sizeL;
    666 
    667 			if( spL == 0 )
    668 			{
    669 				bbs_ERROR0( "struct bts_Flt16Alt2D bts_Cluster2D_alt( ... ):\n"
    670 						   "All nodes of the src cluster are sitting in the center -> "
    671 						   "unable to compute scale matrix between clusters" );
    672 			}
    673 			else
    674 			{
    675 				uint32 scaleL, shiftL, quotientL, enumL, denomL, bitsTaken0L, bitsTaken1L;
    676 				int32 bbp_scaleL, cL, rL, c1L, r1L;
    677 				int32 ppL, pmL, mpL, mmL, maxL;
    678 				int32 quotientBbpL, bbp_crL, posL;
    679 
    680 
    681 				/* find scale factor: */
    682 
    683 				bts_Cluster2D_computeScale( sqL, dstBbpL, spL, srcBbpL, &scaleL, &bbp_scaleL );
    684 
    685 
    686 				/* find rotation matrix: */
    687 
    688 				/* sign not needed any more */
    689 				enumL = bbs_abs( pxqyL - pyqxL );
    690 				denomL = bbs_abs( pxqxL + pyqyL );
    691 
    692 				if( denomL == 0 )
    693 				{
    694 					cL = 0;
    695 					rL = 1;
    696 					quotientBbpL = 0;
    697 				}
    698 				else
    699 				{
    700 					/* original formula:
    701 
    702 					float aL = enumL / denomL;
    703 					cL = sqrt( 1.0 / ( 1.0 + ebs_sqr( aL ) ) );
    704 					rL = sqrt( 1 - ebs_sqr( cL ) );
    705 
    706 					*/
    707 
    708 					/* how far can we shift enumL to the left */
    709 					shiftL = 31 - bbs_intLog2( enumL );
    710 
    711 					/* result has bbp = shiftL */
    712 					quotientL = ( enumL << shiftL ) / denomL;
    713 					quotientBbpL = shiftL;
    714 
    715 					posL = bbs_intLog2( quotientL );
    716 
    717 					/* if enumL much larger than denomL, then we cannot square the quotient */
    718 					if( posL > ( quotientBbpL + 14 ) )
    719 					{
    720 						cL = 0;
    721 						rL = 1;
    722 						quotientBbpL = 0;
    723 					}
    724 					else if( quotientBbpL > ( posL + 14 ) )
    725 					{
    726 						cL = 1;
    727 						rL = 0;
    728 						quotientBbpL = 0;
    729 					}
    730 					else
    731 					{
    732 						bbs_uint32ReduceToNBits( &quotientL, &quotientBbpL, 15 );
    733 
    734 						/* to avoid an overflow in the next operation */
    735 						if( quotientBbpL > 15 )
    736 						{
    737 							quotientL >>= ( quotientBbpL - 15 );
    738 							quotientBbpL -= ( quotientBbpL - 15 );
    739 						}
    740 
    741 						/* result has again bbp = quotientBbpL */
    742 						denomL = bbs_fastSqrt32( quotientL * quotientL + ( ( int32 )1 << ( quotientBbpL << 1 ) ) );
    743 
    744 						quotientL = ( ( uint32 )1 << 31 ) / denomL;
    745 						quotientBbpL = 31 - quotientBbpL;
    746 
    747 						bbs_uint32ReduceToNBits( &quotientL, &quotientBbpL, 15 );
    748 
    749 						/* to avoid an overflow in the next operation */
    750 						if( quotientBbpL > 15 )
    751 						{
    752 							quotientL >>= ( quotientBbpL - 15 );
    753 							quotientBbpL -= ( quotientBbpL - 15 );
    754 						}
    755 
    756 						cL = quotientL;
    757 						rL = bbs_fastSqrt32( ( ( int32 )1 << ( quotientBbpL << 1 ) ) - quotientL * quotientL );
    758 					}
    759 				}
    760 
    761 				/* save cL and rL with this accuracy for later */
    762 				c1L = cL;
    763 				r1L = rL;
    764 				bbp_crL = quotientBbpL;
    765 
    766 				/* prepare the next computations */
    767 				bitsTaken0L = bts_maxAbsIntLog2Of4( pxqxL, pxqyL, pyqxL, pyqyL ) + 1;
    768 				bitsTaken1L = bts_maxAbsIntLog2Of2( cL, rL ) + 1;
    769 
    770 				if( ( bitsTaken0L + bitsTaken1L ) > 29 )
    771 				{
    772 					int32 shiftL = bitsTaken0L + bitsTaken1L - 29;
    773 					cL >>= shiftL;
    774 					rL >>= shiftL;
    775 					quotientBbpL -= shiftL;
    776 				}
    777 
    778 				/* best combination: */
    779 				ppL =   cL * pxqxL - rL * pyqxL + cL * pyqyL + rL * pxqyL;
    780 				pmL =   cL * pxqxL + rL * pyqxL + cL * pyqyL - rL * pxqyL;
    781 				mpL = - cL * pxqxL - rL * pyqxL - cL * pyqyL + rL * pxqyL;
    782 				mmL = - cL * pxqxL + rL * pyqxL - cL * pyqyL - rL * pxqyL;
    783 
    784 				maxL = bbs_max( bbs_max( ppL, pmL ), bbs_max( mpL, mmL ) );
    785 
    786 				/* restore cL and rL, bbp = bbp_crL */
    787 				cL = c1L;
    788 				rL = r1L;
    789 
    790 				/* rotation matrix */
    791 				if( ppL == maxL )
    792 				{
    793 					altL.matE = bts_Flt16Mat2D_create32( cL, -rL, rL, cL, bbp_crL );
    794 				}
    795 				else if( pmL == maxL )
    796 				{
    797 					altL.matE = bts_Flt16Mat2D_create32( cL, rL, -rL, cL, bbp_crL );
    798 				}
    799 				else if( mpL == maxL )
    800 				{
    801 					altL.matE = bts_Flt16Mat2D_create32( -cL, -rL, rL, -cL, bbp_crL );
    802 				}
    803 				else
    804 				{
    805 					altL.matE = bts_Flt16Mat2D_create32( -cL, rL, -rL, -cL, bbp_crL );
    806 				}
    807 
    808 
    809 				/* find translation: */
    810 
    811 				/* original formula:
    812 
    813 				ets_Float2DVec transL = cqL - ( scaleL * ( rotL * cpL ) );
    814 				altL.mat( rotL * scaleL );
    815 				altL.vec( transL );
    816 
    817 				*/
    818 
    819 				bts_Flt16Mat2D_scale( &altL.matE, scaleL, bbp_scaleL );
    820 				cpMappedL = bts_Flt16Mat2D_mapFlt( &altL.matE, &cpL );
    821 				altL.vecE = bts_Flt16Vec2D_sub( cqL, cpMappedL );
    822 			}
    823 
    824 			return altL;
    825 		}
    826 
    827 		case bts_ALT_LINEAR:
    828 		{
    829 			/* smaller of the 2 bbp's */
    830 			int32 minBbpL = bbs_min( srcBbpL, dstBbpL );
    831 
    832 			int32 iL = 0;
    833 			int32 pxpxL = 0;
    834 			int32 pxpyL = 0;
    835 			int32 pypyL = 0;
    836 			int32 pxqxL = 0;
    837 			int32 pxqyL = 0;
    838 			int32 pyqxL = 0;
    839 			int32 pyqyL = 0;
    840 
    841 			struct bts_Int16Vec2D* srcPtrL = srcPtrA->vecArrE;
    842 			struct bts_Int16Vec2D* dstPtrL = dstPtrA->vecArrE;
    843 
    844 			/* get cp adjusted to dstBbpL */
    845 			int32 shiftL = dstBbpL - srcBbpL;
    846 			if( shiftL > 0 )
    847 			{
    848 				cpAdjustedL.xE = cpL.xE << shiftL;
    849 				cpAdjustedL.yE = cpL.yE << shiftL;
    850 				cpAdjustedL.bbpE = dstBbpL;
    851 			}
    852 			else
    853 			{
    854 				cpAdjustedL.xE = ( ( cpL.xE >> ( -shiftL - 1 ) ) + 1 ) >> 1;
    855 				cpAdjustedL.yE = ( ( cpL.yE >> ( -shiftL - 1 ) ) + 1 ) >> 1;
    856 				cpAdjustedL.bbpE = dstBbpL;
    857 			}
    858 
    859 			iL = sizeL;
    860 			while( iL-- )
    861 			{
    862 				int32 pxL = srcPtrL->xE - cpL.xE;
    863 				int32 pyL = srcPtrL->yE - cpL.yE;
    864 				int32 qxL = dstPtrL->xE - cpAdjustedL.xE;	/* cp, not cq! */
    865 				int32 qyL = dstPtrL->yE - cpAdjustedL.yE;
    866 				srcPtrL++;
    867 				dstPtrL++;
    868 
    869 				/* overflow estimate: no problem with  100 nodes,  bbp = 6,  x = y = 500 */
    870 				pxpxL += ( pxL * pxL ) >> srcBbpL;
    871 				pxpyL += ( pxL * pyL ) >> srcBbpL;
    872 				pypyL += ( pyL * pyL ) >> srcBbpL;
    873 
    874 				pxqxL += ( pxL * qxL ) >> minBbpL;
    875 				pxqyL += ( pxL * qyL ) >> minBbpL;
    876 				pyqxL += ( pyL * qxL ) >> minBbpL;
    877 				pyqyL += ( pyL * qyL ) >> minBbpL;
    878 			}
    879 
    880 			pxpxL /= ( int32 )sizeL;
    881 			pxpyL /= ( int32 )sizeL;
    882 			pypyL /= ( int32 )sizeL;
    883 			pxqxL /= ( int32 )sizeL;
    884 			pxqyL /= ( int32 )sizeL;
    885 			pyqxL /= ( int32 )sizeL;
    886 			pyqyL /= ( int32 )sizeL;
    887 
    888 			{
    889 				/* original code:
    890 
    891 				float detPL = ( pxpxL * pypyL ) - ( pxpyL * pxpyL );
    892 
    893 				if( ebs_neglectable( detPL ) )
    894 				{
    895 					matL.setIdentity();
    896 				}
    897 				else
    898 				{
    899 					matL.xx( ( pxqxL * pypyL - pyqxL * pxpyL ) / detPL );
    900 					matL.xy( ( pyqxL * pxpxL - pxqxL * pxpyL ) / detPL );
    901 					matL.yx( ( pxqyL * pypyL - pyqyL * pxpyL ) / detPL );
    902 					matL.yy( ( pyqyL * pxpxL - pxqyL * pxpyL ) / detPL );
    903 				}
    904 
    905 				*/
    906 
    907 				/* compute det first */
    908 				uint32 bitsTaken0L = bts_maxAbsIntLog2Of4( pxpxL, pypyL, pxpyL, pxpyL ) + 1;
    909 				int32 shL = 0;
    910 				int32 detL = 0;
    911 				int32 detBbpL = 0;
    912 
    913 				if( bitsTaken0L > 15 )
    914 				{
    915 					shL = bitsTaken0L - 15;
    916 				}
    917 
    918 				detL = ( pxpxL >> shL ) * ( pypyL >> shL ) - ( pxpyL >> shL ) * ( pxpyL >> shL );
    919 
    920 				/* this can be negative */
    921 				detBbpL = ( srcBbpL - shL ) << 1;
    922 
    923 				/* reduce to 15 bit */
    924 				shL = ( int32 )bts_absIntLog2( detL );
    925 				if( shL > 15 )
    926 				{
    927 					detL >>= ( shL - 15 );
    928 					detBbpL -= ( shL - 15 );
    929 				}
    930 
    931 				if( detL != 0 )
    932 				{
    933 					int32 sh0L, sh1L, xxL, xyL, yxL, yyL, bbp_enumL;
    934 					uint32 bitsTaken1L, highestBitL;
    935 
    936 					sh0L = 0;
    937 					if( bitsTaken0L > 15 )
    938 					{
    939 						sh0L = bitsTaken0L - 15;
    940 					}
    941 
    942 					bitsTaken1L = bts_maxAbsIntLog2Of4( pxqxL, pxqyL, pyqxL, pyqyL ) + 1;
    943 					sh1L = 0;
    944 					if( bitsTaken1L > 15 )
    945 					{
    946 						sh1L = bitsTaken1L - 15;
    947 					}
    948 
    949 					xxL = ( pxqxL >> sh1L ) * ( pypyL >> sh0L ) - ( pyqxL >> sh1L ) * ( pxpyL >> sh0L );
    950 					xyL = ( pyqxL >> sh1L ) * ( pxpxL >> sh0L ) - ( pxqxL >> sh1L ) * ( pxpyL >> sh0L );
    951 					yxL = ( pxqyL >> sh1L ) * ( pypyL >> sh0L ) - ( pyqyL >> sh1L ) * ( pxpyL >> sh0L );
    952 					yyL = ( pyqyL >> sh1L ) * ( pxpxL >> sh0L ) - ( pxqyL >> sh1L ) * ( pxpyL >> sh0L );
    953 
    954 					/* again, can be negative */
    955 					bbp_enumL = ( srcBbpL - sh0L ) + ( bbs_max( srcBbpL, dstBbpL ) - sh1L );
    956 
    957 					highestBitL = bts_maxAbsIntLog2Of4( xxL, xyL, yxL, yyL ) + 1;
    958 
    959 					/* shift left */
    960 					xxL <<= ( 31 - highestBitL );
    961 					xyL <<= ( 31 - highestBitL );
    962 					yxL <<= ( 31 - highestBitL );
    963 					yyL <<= ( 31 - highestBitL );
    964 
    965 					bbp_enumL += ( 31 - highestBitL );
    966 
    967 					xxL /= detL;
    968 					xyL /= detL;
    969 					yxL /= detL;
    970 					yyL /= detL;
    971 
    972 					bbp_enumL -= detBbpL;
    973 
    974 					altL.matE = bts_Flt16Mat2D_create32( xxL, xyL, yxL, yyL, bbp_enumL );
    975 				}
    976 
    977 				cpMappedL = bts_Flt16Mat2D_mapFlt( &altL.matE, &cpL );
    978 				altL.vecE = bts_Flt16Vec2D_sub( cqL, cpMappedL );
    979 			}
    980 
    981 			return altL;
    982 		}
    983 
    984 		default:
    985 		{
    986 			bbs_ERROR1( "struct bts_Flt16Alt2D bts_Cluster2D_alt( ... ):\n"
    987 				       "altType %d is not handled", altTypeL );
    988 		}
    989 	}
    990 
    991 	return altL;
    992 }
    993 
    994 /* ------------------------------------------------------------------------- */
    995 
    996 /* ========================================================================= */
    997 
    998