1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 12 /* This code is in the public domain. 13 ** Version: 1.1 Author: Walt Karas 14 */ 15 16 #include "hmm_intrnl.h" 17 18 void U(init)(U(descriptor) *desc) 19 { 20 desc->avl_tree_root = 0; 21 desc->last_freed = 0; 22 } 23 24 /* Remove a free block from a bin's doubly-linked list when it is not, 25 ** the first block in the bin. 26 */ 27 void U(dll_remove)( 28 /* Pointer to pointer record in the block to be removed. */ 29 ptr_record *to_remove) 30 { 31 to_remove->prev->next = to_remove->next; 32 33 if (to_remove->next) 34 to_remove->next->prev = to_remove->prev; 35 } 36 37 /* Put a block into the free collection of a heap. 38 */ 39 void U(into_free_collection)( 40 /* Pointer to heap descriptor. */ 41 U(descriptor) *desc, 42 /* Pointer to head record of block. */ 43 head_record *head_ptr) 44 { 45 ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr); 46 47 ptr_record *bin_front_ptr = 48 U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr); 49 50 if (bin_front_ptr != ptr_rec_ptr) 51 { 52 /* The block was not inserted into the AVL tree because there is 53 ** already a bin for the size of the block. */ 54 55 MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr) 56 ptr_rec_ptr->self = ptr_rec_ptr; 57 58 /* Make the block the new second block in the bin's doubly-linked 59 ** list. */ 60 ptr_rec_ptr->prev = bin_front_ptr; 61 ptr_rec_ptr->next = bin_front_ptr->next; 62 bin_front_ptr->next = ptr_rec_ptr; 63 64 if (ptr_rec_ptr->next) 65 ptr_rec_ptr->next->prev = ptr_rec_ptr; 66 } 67 else 68 /* Block is first block in new bin. */ 69 ptr_rec_ptr->next = 0; 70 } 71 72 /* Allocate a block from a given bin. Returns a pointer to the payload 73 ** of the removed block. The "last freed" pointer must be null prior 74 ** to calling this function. 75 */ 76 void *U(alloc_from_bin)( 77 /* Pointer to heap descriptor. */ 78 U(descriptor) *desc, 79 /* Pointer to pointer record of first block in bin. */ 80 ptr_record *bin_front_ptr, 81 /* Number of BAUs needed in the allocated block. If the block taken 82 ** from the bin is significantly larger than the number of BAUs needed, 83 ** the "extra" BAUs are split off to form a new free block. */ 84 U(size_bau) n_baus) 85 { 86 head_record *head_ptr; 87 U(size_bau) rem_baus; 88 89 if (bin_front_ptr->next) 90 { 91 /* There are multiple blocks in this bin. Use the 2nd block in 92 ** the bin to avoid needless change to the AVL tree. 93 */ 94 95 ptr_record *ptr_rec_ptr = bin_front_ptr->next; 96 head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr); 97 98 #ifdef AUDIT_FAIL 99 AUDIT_BLOCK(head_ptr) 100 #endif 101 102 U(dll_remove)(ptr_rec_ptr); 103 } 104 else 105 { 106 /* There is only one block in the bin, so it has to be removed 107 ** from the AVL tree. 108 */ 109 110 head_ptr = PTR_REC_TO_HEAD(bin_front_ptr); 111 112 U(avl_remove)( 113 (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr)); 114 } 115 116 MARK_BLOCK_ALLOCATED(head_ptr) 117 118 rem_baus = BLOCK_BAUS(head_ptr) - n_baus; 119 120 if (rem_baus >= MIN_BLOCK_BAUS) 121 { 122 /* Since there are enough "extra" BAUs, split them off to form 123 ** a new free block. 124 */ 125 126 head_record *rem_head_ptr = 127 (head_record *) BAUS_FORWARD(head_ptr, n_baus); 128 129 /* Change the next block's header to reflect the fact that the 130 ** block preceeding it is now smaller. 131 */ 132 SET_PREV_BLOCK_BAUS( 133 BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus) 134 135 head_ptr->block_size = n_baus; 136 137 rem_head_ptr->previous_block_size = n_baus; 138 rem_head_ptr->block_size = rem_baus; 139 140 desc->last_freed = rem_head_ptr; 141 } 142 143 return(HEAD_TO_PTR_REC(head_ptr)); 144 } 145 146 /* Take a block out of the free collection. 147 */ 148 void U(out_of_free_collection)( 149 /* Descriptor of heap that block is in. */ 150 U(descriptor) *desc, 151 /* Pointer to head of block to take out of free collection. */ 152 head_record *head_ptr) 153 { 154 ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr); 155 156 if (ptr_rec_ptr->self == ptr_rec_ptr) 157 /* Block is not the front block in its bin, so all we have to 158 ** do is take it out of the bin's doubly-linked list. */ 159 U(dll_remove)(ptr_rec_ptr); 160 else 161 { 162 ptr_record *next = ptr_rec_ptr->next; 163 164 if (next) 165 /* Block is the front block in its bin, and there is at least 166 ** one other block in the bin. Substitute the next block for 167 ** the front block. */ 168 U(avl_subst)((U(avl_avl) *) &(desc->avl_tree_root), next); 169 else 170 /* Block is the front block in its bin, but there is no other 171 ** block in the bin. Eliminate the bin. */ 172 U(avl_remove)( 173 (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr)); 174 } 175 } 176 177 void U(free)(U(descriptor) *desc, void *payload_ptr) 178 { 179 /* Flags if coalesce with adjacent block. */ 180 int coalesce; 181 182 head_record *fwd_head_ptr; 183 head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr); 184 185 desc->num_baus_can_shrink = 0; 186 187 #ifdef HMM_AUDIT_FAIL 188 189 AUDIT_BLOCK(free_head_ptr) 190 191 /* Make sure not freeing an already free block. */ 192 if (!IS_BLOCK_ALLOCATED(free_head_ptr)) 193 HMM_AUDIT_FAIL 194 195 if (desc->avl_tree_root) 196 /* Audit root block in AVL tree. */ 197 AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) 198 199 #endif 200 201 fwd_head_ptr = 202 (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size); 203 204 if (free_head_ptr->previous_block_size) 205 { 206 /* Coalesce with backward block if possible. */ 207 208 head_record *bkwd_head_ptr = 209 (head_record *) BAUS_BACKWARD( 210 free_head_ptr, free_head_ptr->previous_block_size); 211 212 #ifdef HMM_AUDIT_FAIL 213 AUDIT_BLOCK(bkwd_head_ptr) 214 #endif 215 216 if (bkwd_head_ptr == (head_record *)(desc->last_freed)) 217 { 218 desc->last_freed = 0; 219 coalesce = 1; 220 } 221 else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr)) 222 coalesce = 0; 223 else 224 { 225 U(out_of_free_collection)(desc, bkwd_head_ptr); 226 coalesce = 1; 227 } 228 229 if (coalesce) 230 { 231 bkwd_head_ptr->block_size += free_head_ptr->block_size; 232 SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr)) 233 free_head_ptr = bkwd_head_ptr; 234 } 235 } 236 237 if (fwd_head_ptr->block_size == 0) 238 { 239 /* Block to be freed is last block before dummy end-of-chunk block. */ 240 desc->end_of_shrinkable_chunk = 241 BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS); 242 desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr); 243 244 if (PREV_BLOCK_BAUS(free_head_ptr) == 0) 245 /* Free block is the entire chunk, so shrinking can eliminate 246 ** entire chunk including dummy end block. */ 247 desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS; 248 } 249 else 250 { 251 /* Coalesce with forward block if possible. */ 252 253 #ifdef HMM_AUDIT_FAIL 254 AUDIT_BLOCK(fwd_head_ptr) 255 #endif 256 257 if (fwd_head_ptr == (head_record *)(desc->last_freed)) 258 { 259 desc->last_freed = 0; 260 coalesce = 1; 261 } 262 else if (IS_BLOCK_ALLOCATED(fwd_head_ptr)) 263 coalesce = 0; 264 else 265 { 266 U(out_of_free_collection)(desc, fwd_head_ptr); 267 coalesce = 1; 268 } 269 270 if (coalesce) 271 { 272 free_head_ptr->block_size += fwd_head_ptr->block_size; 273 274 fwd_head_ptr = 275 (head_record *) BAUS_FORWARD( 276 fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr)); 277 278 SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr)) 279 280 if (fwd_head_ptr->block_size == 0) 281 { 282 /* Coalesced block to be freed is last block before dummy 283 ** end-of-chunk block. */ 284 desc->end_of_shrinkable_chunk = 285 BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS); 286 desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr); 287 288 if (PREV_BLOCK_BAUS(free_head_ptr) == 0) 289 /* Free block is the entire chunk, so shrinking can 290 ** eliminate entire chunk including dummy end block. */ 291 desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS; 292 } 293 } 294 } 295 296 if (desc->last_freed) 297 { 298 /* There is a last freed block, but it is not adjacent to the 299 ** block being freed by this call to free, so put the last 300 ** freed block into the free collection. 301 */ 302 303 #ifdef HMM_AUDIT_FAIL 304 AUDIT_BLOCK(desc->last_freed) 305 #endif 306 307 U(into_free_collection)(desc, (head_record *)(desc->last_freed)); 308 } 309 310 desc->last_freed = free_head_ptr; 311 } 312 313 void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus) 314 { 315 #ifdef HMM_AUDIT_FAIL 316 317 if (desc->avl_tree_root) 318 /* Audit root block in AVL tree. */ 319 AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) 320 #endif 321 322 #undef HEAD_PTR 323 #define HEAD_PTR ((head_record *) start) 324 325 /* Make the chunk one big free block followed by a dummy end block. 326 */ 327 328 n_baus -= DUMMY_END_BLOCK_BAUS; 329 330 HEAD_PTR->previous_block_size = 0; 331 HEAD_PTR->block_size = n_baus; 332 333 U(into_free_collection)(desc, HEAD_PTR); 334 335 /* Set up the dummy end block. */ 336 start = BAUS_FORWARD(start, n_baus); 337 HEAD_PTR->previous_block_size = n_baus; 338 HEAD_PTR->block_size = 0; 339 340 #undef HEAD_PTR 341 } 342 343 #ifdef HMM_AUDIT_FAIL 344 345 /* Function that does audit fail actions defined my preprocessor symbol, 346 ** and returns a dummy integer value. 347 */ 348 int U(audit_block_fail_dummy_return)(void) 349 { 350 HMM_AUDIT_FAIL 351 352 /* Dummy return. */ 353 return(0); 354 } 355 356 #endif 357 358 /* AVL Tree instantiation. */ 359 360 #ifdef HMM_AUDIT_FAIL 361 362 /* The AVL tree generic package passes an ACCESS of 1 when it "touches" 363 ** a child node for the first time during a particular operation. I use 364 ** this feature to audit only one time (per operation) the free blocks 365 ** that are tree nodes. Since the root node is not a child node, it has 366 ** to be audited directly. 367 */ 368 369 /* The pain you feel while reading these macros will not be in vain. It 370 ** will remove all doubt from you mind that C++ inline functions are 371 ** a very good thing. 372 */ 373 374 #define AVL_GET_LESS(H, ACCESS) \ 375 (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self) 376 #define AVL_GET_GREATER(H, ACCESS) \ 377 (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev) 378 379 #else 380 381 #define AVL_GET_LESS(H, ACCESS) ((H)->self) 382 #define AVL_GET_GREATER(H, ACCESS) ((H)->prev) 383 384 #endif 385 386 #define AVL_SET_LESS(H, LH) (H)->self = (LH); 387 #define AVL_SET_GREATER(H, GH) (H)->prev = (GH); 388 389 /* high bit of high bit of 390 ** block_size previous_block_size balance factor 391 ** ----------- ------------------- -------------- 392 ** 0 0 n/a (block allocated) 393 ** 0 1 1 394 ** 1 0 -1 395 ** 1 1 0 396 */ 397 398 #define AVL_GET_BALANCE_FACTOR(H) \ 399 ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \ 400 HIGH_BIT_BAU_SIZE) ? \ 401 (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \ 402 HIGH_BIT_BAU_SIZE ? 0 : -1) : 1) 403 404 #define AVL_SET_BALANCE_FACTOR(H, BF) \ 405 { \ 406 register head_record *p = \ 407 (head_record *) PTR_REC_TO_HEAD(H); \ 408 register int bal_f = (BF); \ 409 \ 410 if (bal_f <= 0) \ 411 p->block_size |= HIGH_BIT_BAU_SIZE; \ 412 else \ 413 p->block_size &= ~HIGH_BIT_BAU_SIZE; \ 414 if (bal_f >= 0) \ 415 p->previous_block_size |= HIGH_BIT_BAU_SIZE; \ 416 else \ 417 p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \ 418 } 419 420 #define COMPARE_KEY_KEY(K1, K2) ((K1) == (K2) ? 0 : ((K1) > (K2) ? 1 : -1)) 421 422 #define AVL_COMPARE_KEY_NODE(K, H) \ 423 COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H))) 424 425 #define AVL_COMPARE_NODE_NODE(H1, H2) \ 426 COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \ 427 BLOCK_BAUS(PTR_REC_TO_HEAD(H2))) 428 429 #define AVL_NULL ((ptr_record *) 0) 430 431 #define AVL_IMPL_MASK \ 432 ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST ) 433 434 #include "cavl_impl.h" 435