Home | History | Annotate | Download | only in BroadphaseCollision
      1 /*
      2 Bullet Continuous Collision Detection and Physics Library
      3 Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
      4 
      5 This software is provided 'as-is', without any express or implied warranty.
      6 In no event will the authors be held liable for any damages arising from the use of this software.
      7 Permission is granted to anyone to use this software for any purpose,
      8 including commercial applications, and to alter it and redistribute it freely,
      9 subject to the following restrictions:
     10 
     11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
     12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
     13 3. This notice may not be removed or altered from any source distribution.
     14 */
     15 
     16 #ifndef BT_QUANTIZED_BVH_H
     17 #define BT_QUANTIZED_BVH_H
     18 
     19 class btSerializer;
     20 
     21 //#define DEBUG_CHECK_DEQUANTIZATION 1
     22 #ifdef DEBUG_CHECK_DEQUANTIZATION
     23 #ifdef __SPU__
     24 #define printf spu_printf
     25 #endif //__SPU__
     26 
     27 #include <stdio.h>
     28 #include <stdlib.h>
     29 #endif //DEBUG_CHECK_DEQUANTIZATION
     30 
     31 #include "LinearMath/btVector3.h"
     32 #include "LinearMath/btAlignedAllocator.h"
     33 
     34 #ifdef BT_USE_DOUBLE_PRECISION
     35 #define btQuantizedBvhData btQuantizedBvhDoubleData
     36 #define btOptimizedBvhNodeData btOptimizedBvhNodeDoubleData
     37 #define btQuantizedBvhDataName "btQuantizedBvhDoubleData"
     38 #else
     39 #define btQuantizedBvhData btQuantizedBvhFloatData
     40 #define btOptimizedBvhNodeData btOptimizedBvhNodeFloatData
     41 #define btQuantizedBvhDataName "btQuantizedBvhFloatData"
     42 #endif
     43 
     44 
     45 
     46 //http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp
     47 
     48 
     49 //Note: currently we have 16 bytes per quantized node
     50 #define MAX_SUBTREE_SIZE_IN_BYTES  2048
     51 
     52 // 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one
     53 // actually) triangles each (since the sign bit is reserved
     54 #define MAX_NUM_PARTS_IN_BITS 10
     55 
     56 ///btQuantizedBvhNode is a compressed aabb node, 16 bytes.
     57 ///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
     58 ATTRIBUTE_ALIGNED16	(struct) btQuantizedBvhNode
     59 {
     60 	BT_DECLARE_ALIGNED_ALLOCATOR();
     61 
     62 	//12 bytes
     63 	unsigned short int	m_quantizedAabbMin[3];
     64 	unsigned short int	m_quantizedAabbMax[3];
     65 	//4 bytes
     66 	int	m_escapeIndexOrTriangleIndex;
     67 
     68 	bool isLeafNode() const
     69 	{
     70 		//skipindex is negative (internal node), triangleindex >=0 (leafnode)
     71 		return (m_escapeIndexOrTriangleIndex >= 0);
     72 	}
     73 	int getEscapeIndex() const
     74 	{
     75 		btAssert(!isLeafNode());
     76 		return -m_escapeIndexOrTriangleIndex;
     77 	}
     78 	int	getTriangleIndex() const
     79 	{
     80 		btAssert(isLeafNode());
     81 		unsigned int x=0;
     82 		unsigned int y = (~(x&0))<<(31-MAX_NUM_PARTS_IN_BITS);
     83 		// Get only the lower bits where the triangle index is stored
     84 		return (m_escapeIndexOrTriangleIndex&~(y));
     85 	}
     86 	int	getPartId() const
     87 	{
     88 		btAssert(isLeafNode());
     89 		// Get only the highest bits where the part index is stored
     90 		return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));
     91 	}
     92 }
     93 ;
     94 
     95 /// btOptimizedBvhNode contains both internal and leaf node information.
     96 /// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes.
     97 ATTRIBUTE_ALIGNED16 (struct) btOptimizedBvhNode
     98 {
     99 	BT_DECLARE_ALIGNED_ALLOCATOR();
    100 
    101 	//32 bytes
    102 	btVector3	m_aabbMinOrg;
    103 	btVector3	m_aabbMaxOrg;
    104 
    105 	//4
    106 	int	m_escapeIndex;
    107 
    108 	//8
    109 	//for child nodes
    110 	int	m_subPart;
    111 	int	m_triangleIndex;
    112 
    113 //pad the size to 64 bytes
    114 	char	m_padding[20];
    115 };
    116 
    117 
    118 ///btBvhSubtreeInfo provides info to gather a subtree of limited size
    119 ATTRIBUTE_ALIGNED16(class) btBvhSubtreeInfo
    120 {
    121 public:
    122 	BT_DECLARE_ALIGNED_ALLOCATOR();
    123 
    124 	//12 bytes
    125 	unsigned short int	m_quantizedAabbMin[3];
    126 	unsigned short int	m_quantizedAabbMax[3];
    127 	//4 bytes, points to the root of the subtree
    128 	int			m_rootNodeIndex;
    129 	//4 bytes
    130 	int			m_subtreeSize;
    131 	int			m_padding[3];
    132 
    133 	btBvhSubtreeInfo()
    134 	{
    135 		//memset(&m_padding[0], 0, sizeof(m_padding));
    136 	}
    137 
    138 
    139 	void	setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode)
    140 	{
    141 		m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
    142 		m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
    143 		m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
    144 		m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
    145 		m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
    146 		m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
    147 	}
    148 }
    149 ;
    150 
    151 
    152 class btNodeOverlapCallback
    153 {
    154 public:
    155 	virtual ~btNodeOverlapCallback() {};
    156 
    157 	virtual void processNode(int subPart, int triangleIndex) = 0;
    158 };
    159 
    160 #include "LinearMath/btAlignedAllocator.h"
    161 #include "LinearMath/btAlignedObjectArray.h"
    162 
    163 
    164 
    165 ///for code readability:
    166 typedef btAlignedObjectArray<btOptimizedBvhNode>	NodeArray;
    167 typedef btAlignedObjectArray<btQuantizedBvhNode>	QuantizedNodeArray;
    168 typedef btAlignedObjectArray<btBvhSubtreeInfo>		BvhSubtreeInfoArray;
    169 
    170 
    171 ///The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
    172 ///It is used by the btBvhTriangleMeshShape as midphase, and by the btMultiSapBroadphase.
    173 ///It is recommended to use quantization for better performance and lower memory requirements.
    174 ATTRIBUTE_ALIGNED16(class) btQuantizedBvh
    175 {
    176 public:
    177 	enum btTraversalMode
    178 	{
    179 		TRAVERSAL_STACKLESS = 0,
    180 		TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
    181 		TRAVERSAL_RECURSIVE
    182 	};
    183 
    184 protected:
    185 
    186 
    187 	btVector3			m_bvhAabbMin;
    188 	btVector3			m_bvhAabbMax;
    189 	btVector3			m_bvhQuantization;
    190 
    191 	int					m_bulletVersion;	//for serialization versioning. It could also be used to detect endianess.
    192 
    193 	int					m_curNodeIndex;
    194 	//quantization data
    195 	bool				m_useQuantization;
    196 
    197 
    198 
    199 	NodeArray			m_leafNodes;
    200 	NodeArray			m_contiguousNodes;
    201 	QuantizedNodeArray	m_quantizedLeafNodes;
    202 	QuantizedNodeArray	m_quantizedContiguousNodes;
    203 
    204 	btTraversalMode	m_traversalMode;
    205 	BvhSubtreeInfoArray		m_SubtreeHeaders;
    206 
    207 	//This is only used for serialization so we don't have to add serialization directly to btAlignedObjectArray
    208 	mutable int m_subtreeHeaderCount;
    209 
    210 
    211 
    212 
    213 
    214 	///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
    215 	///this might be refactored into a virtual, it is usually not calculated at run-time
    216 	void	setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin)
    217 	{
    218 		if (m_useQuantization)
    219 		{
    220 			quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] ,aabbMin,0);
    221 		} else
    222 		{
    223 			m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin;
    224 
    225 		}
    226 	}
    227 	void	setInternalNodeAabbMax(int nodeIndex,const btVector3& aabbMax)
    228 	{
    229 		if (m_useQuantization)
    230 		{
    231 			quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0],aabbMax,1);
    232 		} else
    233 		{
    234 			m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax;
    235 		}
    236 	}
    237 
    238 	btVector3 getAabbMin(int nodeIndex) const
    239 	{
    240 		if (m_useQuantization)
    241 		{
    242 			return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMin[0]);
    243 		}
    244 		//non-quantized
    245 		return m_leafNodes[nodeIndex].m_aabbMinOrg;
    246 
    247 	}
    248 	btVector3 getAabbMax(int nodeIndex) const
    249 	{
    250 		if (m_useQuantization)
    251 		{
    252 			return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]);
    253 		}
    254 		//non-quantized
    255 		return m_leafNodes[nodeIndex].m_aabbMaxOrg;
    256 
    257 	}
    258 
    259 
    260 	void	setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
    261 	{
    262 		if (m_useQuantization)
    263 		{
    264 			m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex;
    265 		}
    266 		else
    267 		{
    268 			m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex;
    269 		}
    270 
    271 	}
    272 
    273 	void mergeInternalNodeAabb(int nodeIndex,const btVector3& newAabbMin,const btVector3& newAabbMax)
    274 	{
    275 		if (m_useQuantization)
    276 		{
    277 			unsigned short int quantizedAabbMin[3];
    278 			unsigned short int quantizedAabbMax[3];
    279 			quantize(quantizedAabbMin,newAabbMin,0);
    280 			quantize(quantizedAabbMax,newAabbMax,1);
    281 			for (int i=0;i<3;i++)
    282 			{
    283 				if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i])
    284 					m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i];
    285 
    286 				if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i])
    287 					m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i];
    288 
    289 			}
    290 		} else
    291 		{
    292 			//non-quantized
    293 			m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin);
    294 			m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax);
    295 		}
    296 	}
    297 
    298 	void	swapLeafNodes(int firstIndex,int secondIndex);
    299 
    300 	void	assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex);
    301 
    302 protected:
    303 
    304 
    305 
    306 	void	buildTree	(int startIndex,int endIndex);
    307 
    308 	int	calcSplittingAxis(int startIndex,int endIndex);
    309 
    310 	int	sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis);
    311 
    312 	void	walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
    313 
    314 	void	walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
    315 	void	walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const;
    316 	void	walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
    317 
    318 	///tree traversal designed for small-memory processors like PS3 SPU
    319 	void	walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
    320 
    321 	///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
    322 	void	walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
    323 
    324 	///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
    325 	void	walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA,const btQuantizedBvhNode* treeNodeB,btNodeOverlapCallback* nodeCallback) const;
    326 
    327 
    328 
    329 
    330 	void	updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex);
    331 
    332 public:
    333 
    334 	BT_DECLARE_ALIGNED_ALLOCATOR();
    335 
    336 	btQuantizedBvh();
    337 
    338 	virtual ~btQuantizedBvh();
    339 
    340 
    341 	///***************************************** expert/internal use only *************************
    342 	void	setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin=btScalar(1.0));
    343 	QuantizedNodeArray&	getLeafNodeArray() {			return	m_quantizedLeafNodes;	}
    344 	///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized
    345 	void	buildInternal();
    346 	///***************************************** expert/internal use only *************************
    347 
    348 	void	reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
    349 	void	reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const;
    350 	void	reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const;
    351 
    352 		SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point,int isMax) const
    353 	{
    354 
    355 		btAssert(m_useQuantization);
    356 
    357 		btAssert(point.getX() <= m_bvhAabbMax.getX());
    358 		btAssert(point.getY() <= m_bvhAabbMax.getY());
    359 		btAssert(point.getZ() <= m_bvhAabbMax.getZ());
    360 
    361 		btAssert(point.getX() >= m_bvhAabbMin.getX());
    362 		btAssert(point.getY() >= m_bvhAabbMin.getY());
    363 		btAssert(point.getZ() >= m_bvhAabbMin.getZ());
    364 
    365 		btVector3 v = (point - m_bvhAabbMin) * m_bvhQuantization;
    366 		///Make sure rounding is done in a way that unQuantize(quantizeWithClamp(...)) is conservative
    367 		///end-points always set the first bit, so that they are sorted properly (so that neighbouring AABBs overlap properly)
    368 		///@todo: double-check this
    369 		if (isMax)
    370 		{
    371 			out[0] = (unsigned short) (((unsigned short)(v.getX()+btScalar(1.)) | 1));
    372 			out[1] = (unsigned short) (((unsigned short)(v.getY()+btScalar(1.)) | 1));
    373 			out[2] = (unsigned short) (((unsigned short)(v.getZ()+btScalar(1.)) | 1));
    374 		} else
    375 		{
    376 			out[0] = (unsigned short) (((unsigned short)(v.getX()) & 0xfffe));
    377 			out[1] = (unsigned short) (((unsigned short)(v.getY()) & 0xfffe));
    378 			out[2] = (unsigned short) (((unsigned short)(v.getZ()) & 0xfffe));
    379 		}
    380 
    381 
    382 #ifdef DEBUG_CHECK_DEQUANTIZATION
    383 		btVector3 newPoint = unQuantize(out);
    384 		if (isMax)
    385 		{
    386 			if (newPoint.getX() < point.getX())
    387 			{
    388 				printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
    389 			}
    390 			if (newPoint.getY() < point.getY())
    391 			{
    392 				printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
    393 			}
    394 			if (newPoint.getZ() < point.getZ())
    395 			{
    396 
    397 				printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
    398 			}
    399 		} else
    400 		{
    401 			if (newPoint.getX() > point.getX())
    402 			{
    403 				printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
    404 			}
    405 			if (newPoint.getY() > point.getY())
    406 			{
    407 				printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
    408 			}
    409 			if (newPoint.getZ() > point.getZ())
    410 			{
    411 				printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
    412 			}
    413 		}
    414 #endif //DEBUG_CHECK_DEQUANTIZATION
    415 
    416 	}
    417 
    418 
    419 	SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2,int isMax) const
    420 	{
    421 
    422 		btAssert(m_useQuantization);
    423 
    424 		btVector3 clampedPoint(point2);
    425 		clampedPoint.setMax(m_bvhAabbMin);
    426 		clampedPoint.setMin(m_bvhAabbMax);
    427 
    428 		quantize(out,clampedPoint,isMax);
    429 
    430 	}
    431 
    432 	SIMD_FORCE_INLINE btVector3	unQuantize(const unsigned short* vecIn) const
    433 	{
    434 			btVector3	vecOut;
    435 			vecOut.setValue(
    436 			(btScalar)(vecIn[0]) / (m_bvhQuantization.getX()),
    437 			(btScalar)(vecIn[1]) / (m_bvhQuantization.getY()),
    438 			(btScalar)(vecIn[2]) / (m_bvhQuantization.getZ()));
    439 			vecOut += m_bvhAabbMin;
    440 			return vecOut;
    441 	}
    442 
    443 	///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
    444 	void	setTraversalMode(btTraversalMode	traversalMode)
    445 	{
    446 		m_traversalMode = traversalMode;
    447 	}
    448 
    449 
    450 	SIMD_FORCE_INLINE QuantizedNodeArray&	getQuantizedNodeArray()
    451 	{
    452 		return	m_quantizedContiguousNodes;
    453 	}
    454 
    455 
    456 	SIMD_FORCE_INLINE BvhSubtreeInfoArray&	getSubtreeInfoArray()
    457 	{
    458 		return m_SubtreeHeaders;
    459 	}
    460 
    461 ////////////////////////////////////////////////////////////////////
    462 
    463 	/////Calculate space needed to store BVH for serialization
    464 	unsigned calculateSerializeBufferSize() const;
    465 
    466 	/// Data buffer MUST be 16 byte aligned
    467 	virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const;
    468 
    469 	///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
    470 	static btQuantizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
    471 
    472 	static unsigned int getAlignmentSerializationPadding();
    473 //////////////////////////////////////////////////////////////////////
    474 
    475 
    476 	virtual	int	calculateSerializeBufferSizeNew() const;
    477 
    478 	///fills the dataBuffer and returns the struct name (and 0 on failure)
    479 	virtual	const char*	serialize(void* dataBuffer, btSerializer* serializer) const;
    480 
    481 	virtual	void deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData);
    482 
    483 	virtual	void deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData);
    484 
    485 
    486 ////////////////////////////////////////////////////////////////////
    487 
    488 	SIMD_FORCE_INLINE bool isQuantized()
    489 	{
    490 		return m_useQuantization;
    491 	}
    492 
    493 private:
    494 	// Special "copy" constructor that allows for in-place deserialization
    495 	// Prevents btVector3's default constructor from being called, but doesn't inialize much else
    496 	// ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need)
    497 	btQuantizedBvh(btQuantizedBvh &other, bool ownsMemory);
    498 
    499 }
    500 ;
    501 
    502 
    503 struct	btBvhSubtreeInfoData
    504 {
    505 	int			m_rootNodeIndex;
    506 	int			m_subtreeSize;
    507 	unsigned short m_quantizedAabbMin[3];
    508 	unsigned short m_quantizedAabbMax[3];
    509 };
    510 
    511 struct btOptimizedBvhNodeFloatData
    512 {
    513 	btVector3FloatData	m_aabbMinOrg;
    514 	btVector3FloatData	m_aabbMaxOrg;
    515 	int	m_escapeIndex;
    516 	int	m_subPart;
    517 	int	m_triangleIndex;
    518 	char m_pad[4];
    519 };
    520 
    521 struct btOptimizedBvhNodeDoubleData
    522 {
    523 	btVector3DoubleData	m_aabbMinOrg;
    524 	btVector3DoubleData	m_aabbMaxOrg;
    525 	int	m_escapeIndex;
    526 	int	m_subPart;
    527 	int	m_triangleIndex;
    528 	char	m_pad[4];
    529 };
    530 
    531 
    532 struct btQuantizedBvhNodeData
    533 {
    534 	unsigned short m_quantizedAabbMin[3];
    535 	unsigned short m_quantizedAabbMax[3];
    536 	int	m_escapeIndexOrTriangleIndex;
    537 };
    538 
    539 struct	btQuantizedBvhFloatData
    540 {
    541 	btVector3FloatData			m_bvhAabbMin;
    542 	btVector3FloatData			m_bvhAabbMax;
    543 	btVector3FloatData			m_bvhQuantization;
    544 	int					m_curNodeIndex;
    545 	int					m_useQuantization;
    546 	int					m_numContiguousLeafNodes;
    547 	int					m_numQuantizedContiguousNodes;
    548 	btOptimizedBvhNodeFloatData	*m_contiguousNodesPtr;
    549 	btQuantizedBvhNodeData		*m_quantizedContiguousNodesPtr;
    550 	btBvhSubtreeInfoData	*m_subTreeInfoPtr;
    551 	int					m_traversalMode;
    552 	int					m_numSubtreeHeaders;
    553 
    554 };
    555 
    556 struct	btQuantizedBvhDoubleData
    557 {
    558 	btVector3DoubleData			m_bvhAabbMin;
    559 	btVector3DoubleData			m_bvhAabbMax;
    560 	btVector3DoubleData			m_bvhQuantization;
    561 	int							m_curNodeIndex;
    562 	int							m_useQuantization;
    563 	int							m_numContiguousLeafNodes;
    564 	int							m_numQuantizedContiguousNodes;
    565 	btOptimizedBvhNodeDoubleData	*m_contiguousNodesPtr;
    566 	btQuantizedBvhNodeData			*m_quantizedContiguousNodesPtr;
    567 
    568 	int							m_traversalMode;
    569 	int							m_numSubtreeHeaders;
    570 	btBvhSubtreeInfoData		*m_subTreeInfoPtr;
    571 };
    572 
    573 
    574 SIMD_FORCE_INLINE	int	btQuantizedBvh::calculateSerializeBufferSizeNew() const
    575 {
    576 	return sizeof(btQuantizedBvhData);
    577 }
    578 
    579 
    580 
    581 #endif //BT_QUANTIZED_BVH_H
    582