1 /* 2 * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. 3 * 4 * Copyright (C) 2002-2011 Aleph One Ltd. 5 * for Toby Churchill Ltd and Brightstar Engineering 6 * 7 * Created by Charles Manning <charles (at) aleph1.co.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include "yportenv.h" 15 #include "yaffs_trace.h" 16 17 #include "yaffs_guts.h" 18 #include "yaffs_getblockinfo.h" 19 #include "yaffs_tagscompat.h" 20 #include "yaffs_nand.h" 21 #include "yaffs_yaffs1.h" 22 #include "yaffs_yaffs2.h" 23 #include "yaffs_bitmap.h" 24 #include "yaffs_verify.h" 25 #include "yaffs_nand.h" 26 #include "yaffs_packedtags2.h" 27 #include "yaffs_nameval.h" 28 #include "yaffs_allocator.h" 29 #include "yaffs_attribs.h" 30 #include "yaffs_summary.h" 31 32 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */ 33 #define YAFFS_GC_GOOD_ENOUGH 2 34 #define YAFFS_GC_PASSIVE_THRESHOLD 4 35 36 #include "yaffs_ecc.h" 37 38 /* Forward declarations */ 39 40 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, 41 const u8 *buffer, int n_bytes, int use_reserve); 42 43 44 45 /* Function to calculate chunk and offset */ 46 47 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr, 48 int *chunk_out, u32 *offset_out) 49 { 50 int chunk; 51 u32 offset; 52 53 chunk = (u32) (addr >> dev->chunk_shift); 54 55 if (dev->chunk_div == 1) { 56 /* easy power of 2 case */ 57 offset = (u32) (addr & dev->chunk_mask); 58 } else { 59 /* Non power-of-2 case */ 60 61 loff_t chunk_base; 62 63 chunk /= dev->chunk_div; 64 65 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk; 66 offset = (u32) (addr - chunk_base); 67 } 68 69 *chunk_out = chunk; 70 *offset_out = offset; 71 } 72 73 /* Function to return the number of shifts for a power of 2 greater than or 74 * equal to the given number 75 * Note we don't try to cater for all possible numbers and this does not have to 76 * be hellishly efficient. 77 */ 78 79 static inline u32 calc_shifts_ceiling(u32 x) 80 { 81 int extra_bits; 82 int shifts; 83 84 shifts = extra_bits = 0; 85 86 while (x > 1) { 87 if (x & 1) 88 extra_bits++; 89 x >>= 1; 90 shifts++; 91 } 92 93 if (extra_bits) 94 shifts++; 95 96 return shifts; 97 } 98 99 /* Function to return the number of shifts to get a 1 in bit 0 100 */ 101 102 static inline u32 calc_shifts(u32 x) 103 { 104 u32 shifts; 105 106 shifts = 0; 107 108 if (!x) 109 return 0; 110 111 while (!(x & 1)) { 112 x >>= 1; 113 shifts++; 114 } 115 116 return shifts; 117 } 118 119 /* 120 * Temporary buffer manipulations. 121 */ 122 123 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev) 124 { 125 int i; 126 u8 *buf = (u8 *) 1; 127 128 memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer)); 129 130 for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) { 131 dev->temp_buffer[i].in_use = 0; 132 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); 133 dev->temp_buffer[i].buffer = buf; 134 } 135 136 return buf ? YAFFS_OK : YAFFS_FAIL; 137 } 138 139 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev) 140 { 141 int i; 142 143 dev->temp_in_use++; 144 if (dev->temp_in_use > dev->max_temp) 145 dev->max_temp = dev->temp_in_use; 146 147 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { 148 if (dev->temp_buffer[i].in_use == 0) { 149 dev->temp_buffer[i].in_use = 1; 150 return dev->temp_buffer[i].buffer; 151 } 152 } 153 154 yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers"); 155 /* 156 * If we got here then we have to allocate an unmanaged one 157 * This is not good. 158 */ 159 160 dev->unmanaged_buffer_allocs++; 161 return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS); 162 163 } 164 165 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer) 166 { 167 int i; 168 169 dev->temp_in_use--; 170 171 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { 172 if (dev->temp_buffer[i].buffer == buffer) { 173 dev->temp_buffer[i].in_use = 0; 174 return; 175 } 176 } 177 178 if (buffer) { 179 /* assume it is an unmanaged one. */ 180 yaffs_trace(YAFFS_TRACE_BUFFERS, 181 "Releasing unmanaged temp buffer"); 182 kfree(buffer); 183 dev->unmanaged_buffer_deallocs++; 184 } 185 186 } 187 188 /* 189 * Determine if we have a managed buffer. 190 */ 191 int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 *buffer) 192 { 193 int i; 194 195 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { 196 if (dev->temp_buffer[i].buffer == buffer) 197 return 1; 198 } 199 200 for (i = 0; i < dev->param.n_caches; i++) { 201 if (dev->cache[i].data == buffer) 202 return 1; 203 } 204 205 if (buffer == dev->checkpt_buffer) 206 return 1; 207 208 yaffs_trace(YAFFS_TRACE_ALWAYS, 209 "yaffs: unmaged buffer detected."); 210 return 0; 211 } 212 213 /* 214 * Functions for robustisizing TODO 215 * 216 */ 217 218 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk, 219 const u8 *data, 220 const struct yaffs_ext_tags *tags) 221 { 222 } 223 224 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk, 225 const struct yaffs_ext_tags *tags) 226 { 227 } 228 229 void yaffs_handle_chunk_error(struct yaffs_dev *dev, 230 struct yaffs_block_info *bi) 231 { 232 if (!bi->gc_prioritise) { 233 bi->gc_prioritise = 1; 234 dev->has_pending_prioritised_gc = 1; 235 bi->chunk_error_strikes++; 236 237 if (bi->chunk_error_strikes > 3) { 238 bi->needs_retiring = 1; /* Too many stikes, so retire */ 239 yaffs_trace(YAFFS_TRACE_ALWAYS, 240 "yaffs: Block struck out"); 241 242 } 243 } 244 } 245 246 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk, 247 int erased_ok) 248 { 249 int flash_block = nand_chunk / dev->param.chunks_per_block; 250 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); 251 252 yaffs_handle_chunk_error(dev, bi); 253 254 if (erased_ok) { 255 /* Was an actual write failure, 256 * so mark the block for retirement.*/ 257 bi->needs_retiring = 1; 258 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, 259 "**>> Block %d needs retiring", flash_block); 260 } 261 262 /* Delete the chunk */ 263 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); 264 yaffs_skip_rest_of_block(dev); 265 } 266 267 /* 268 * Verification code 269 */ 270 271 /* 272 * Simple hash function. Needs to have a reasonable spread 273 */ 274 275 static inline int yaffs_hash_fn(int n) 276 { 277 if (n < 0) 278 n = -n; 279 return n % YAFFS_NOBJECT_BUCKETS; 280 } 281 282 /* 283 * Access functions to useful fake objects. 284 * Note that root might have a presence in NAND if permissions are set. 285 */ 286 287 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev) 288 { 289 return dev->root_dir; 290 } 291 292 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev) 293 { 294 return dev->lost_n_found; 295 } 296 297 /* 298 * Erased NAND checking functions 299 */ 300 301 int yaffs_check_ff(u8 *buffer, int n_bytes) 302 { 303 /* Horrible, slow implementation */ 304 while (n_bytes--) { 305 if (*buffer != 0xff) 306 return 0; 307 buffer++; 308 } 309 return 1; 310 } 311 312 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk) 313 { 314 int retval = YAFFS_OK; 315 u8 *data = yaffs_get_temp_buffer(dev); 316 struct yaffs_ext_tags tags; 317 318 yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags); 319 320 if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR) 321 retval = YAFFS_FAIL; 322 323 if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) || 324 tags.chunk_used) { 325 yaffs_trace(YAFFS_TRACE_NANDACCESS, 326 "Chunk %d not erased", nand_chunk); 327 retval = YAFFS_FAIL; 328 } 329 330 yaffs_release_temp_buffer(dev, data); 331 332 return retval; 333 334 } 335 336 static int yaffs_verify_chunk_written(struct yaffs_dev *dev, 337 int nand_chunk, 338 const u8 *data, 339 struct yaffs_ext_tags *tags) 340 { 341 int retval = YAFFS_OK; 342 struct yaffs_ext_tags temp_tags; 343 u8 *buffer = yaffs_get_temp_buffer(dev); 344 345 yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags); 346 if (memcmp(buffer, data, dev->data_bytes_per_chunk) || 347 temp_tags.obj_id != tags->obj_id || 348 temp_tags.chunk_id != tags->chunk_id || 349 temp_tags.n_bytes != tags->n_bytes) 350 retval = YAFFS_FAIL; 351 352 yaffs_release_temp_buffer(dev, buffer); 353 354 return retval; 355 } 356 357 358 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks) 359 { 360 int reserved_chunks; 361 int reserved_blocks = dev->param.n_reserved_blocks; 362 int checkpt_blocks; 363 364 checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev); 365 366 reserved_chunks = 367 (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block; 368 369 return (dev->n_free_chunks > (reserved_chunks + n_chunks)); 370 } 371 372 static int yaffs_find_alloc_block(struct yaffs_dev *dev) 373 { 374 int i; 375 struct yaffs_block_info *bi; 376 377 if (dev->n_erased_blocks < 1) { 378 /* Hoosterman we've got a problem. 379 * Can't get space to gc 380 */ 381 yaffs_trace(YAFFS_TRACE_ERROR, 382 "yaffs tragedy: no more erased blocks"); 383 384 return -1; 385 } 386 387 /* Find an empty block. */ 388 389 for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { 390 dev->alloc_block_finder++; 391 if (dev->alloc_block_finder < dev->internal_start_block 392 || dev->alloc_block_finder > dev->internal_end_block) { 393 dev->alloc_block_finder = dev->internal_start_block; 394 } 395 396 bi = yaffs_get_block_info(dev, dev->alloc_block_finder); 397 398 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) { 399 bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING; 400 dev->seq_number++; 401 bi->seq_number = dev->seq_number; 402 dev->n_erased_blocks--; 403 yaffs_trace(YAFFS_TRACE_ALLOCATE, 404 "Allocated block %d, seq %d, %d left" , 405 dev->alloc_block_finder, dev->seq_number, 406 dev->n_erased_blocks); 407 return dev->alloc_block_finder; 408 } 409 } 410 411 yaffs_trace(YAFFS_TRACE_ALWAYS, 412 "yaffs tragedy: no more erased blocks, but there should have been %d", 413 dev->n_erased_blocks); 414 415 return -1; 416 } 417 418 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver, 419 struct yaffs_block_info **block_ptr) 420 { 421 int ret_val; 422 struct yaffs_block_info *bi; 423 424 if (dev->alloc_block < 0) { 425 /* Get next block to allocate off */ 426 dev->alloc_block = yaffs_find_alloc_block(dev); 427 dev->alloc_page = 0; 428 } 429 430 if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) { 431 /* No space unless we're allowed to use the reserve. */ 432 return -1; 433 } 434 435 if (dev->n_erased_blocks < dev->param.n_reserved_blocks 436 && dev->alloc_page == 0) 437 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve"); 438 439 /* Next page please.... */ 440 if (dev->alloc_block >= 0) { 441 bi = yaffs_get_block_info(dev, dev->alloc_block); 442 443 ret_val = (dev->alloc_block * dev->param.chunks_per_block) + 444 dev->alloc_page; 445 bi->pages_in_use++; 446 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page); 447 448 dev->alloc_page++; 449 450 dev->n_free_chunks--; 451 452 /* If the block is full set the state to full */ 453 if (dev->alloc_page >= dev->param.chunks_per_block) { 454 bi->block_state = YAFFS_BLOCK_STATE_FULL; 455 dev->alloc_block = -1; 456 } 457 458 if (block_ptr) 459 *block_ptr = bi; 460 461 return ret_val; 462 } 463 464 yaffs_trace(YAFFS_TRACE_ERROR, 465 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!"); 466 467 return -1; 468 } 469 470 static int yaffs_get_erased_chunks(struct yaffs_dev *dev) 471 { 472 int n; 473 474 n = dev->n_erased_blocks * dev->param.chunks_per_block; 475 476 if (dev->alloc_block > 0) 477 n += (dev->param.chunks_per_block - dev->alloc_page); 478 479 return n; 480 481 } 482 483 /* 484 * yaffs_skip_rest_of_block() skips over the rest of the allocation block 485 * if we don't want to write to it. 486 */ 487 void yaffs_skip_rest_of_block(struct yaffs_dev *dev) 488 { 489 struct yaffs_block_info *bi; 490 491 if (dev->alloc_block > 0) { 492 bi = yaffs_get_block_info(dev, dev->alloc_block); 493 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) { 494 bi->block_state = YAFFS_BLOCK_STATE_FULL; 495 dev->alloc_block = -1; 496 } 497 } 498 } 499 500 static int yaffs_write_new_chunk(struct yaffs_dev *dev, 501 const u8 *data, 502 struct yaffs_ext_tags *tags, int use_reserver) 503 { 504 int attempts = 0; 505 int write_ok = 0; 506 int chunk; 507 508 yaffs2_checkpt_invalidate(dev); 509 510 do { 511 struct yaffs_block_info *bi = 0; 512 int erased_ok = 0; 513 514 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi); 515 if (chunk < 0) { 516 /* no space */ 517 break; 518 } 519 520 /* First check this chunk is erased, if it needs 521 * checking. The checking policy (unless forced 522 * always on) is as follows: 523 * 524 * Check the first page we try to write in a block. 525 * If the check passes then we don't need to check any 526 * more. If the check fails, we check again... 527 * If the block has been erased, we don't need to check. 528 * 529 * However, if the block has been prioritised for gc, 530 * then we think there might be something odd about 531 * this block and stop using it. 532 * 533 * Rationale: We should only ever see chunks that have 534 * not been erased if there was a partially written 535 * chunk due to power loss. This checking policy should 536 * catch that case with very few checks and thus save a 537 * lot of checks that are most likely not needed. 538 * 539 * Mods to the above 540 * If an erase check fails or the write fails we skip the 541 * rest of the block. 542 */ 543 544 /* let's give it a try */ 545 attempts++; 546 547 if (dev->param.always_check_erased) 548 bi->skip_erased_check = 0; 549 550 if (!bi->skip_erased_check) { 551 erased_ok = yaffs_check_chunk_erased(dev, chunk); 552 if (erased_ok != YAFFS_OK) { 553 yaffs_trace(YAFFS_TRACE_ERROR, 554 "**>> yaffs chunk %d was not erased", 555 chunk); 556 557 /* If not erased, delete this one, 558 * skip rest of block and 559 * try another chunk */ 560 yaffs_chunk_del(dev, chunk, 1, __LINE__); 561 yaffs_skip_rest_of_block(dev); 562 continue; 563 } 564 } 565 566 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags); 567 568 if (!bi->skip_erased_check) 569 write_ok = 570 yaffs_verify_chunk_written(dev, chunk, data, tags); 571 572 if (write_ok != YAFFS_OK) { 573 /* Clean up aborted write, skip to next block and 574 * try another chunk */ 575 yaffs_handle_chunk_wr_error(dev, chunk, erased_ok); 576 continue; 577 } 578 579 bi->skip_erased_check = 1; 580 581 /* Copy the data into the robustification buffer */ 582 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags); 583 584 } while (write_ok != YAFFS_OK && 585 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts)); 586 587 if (!write_ok) 588 chunk = -1; 589 590 if (attempts > 1) { 591 yaffs_trace(YAFFS_TRACE_ERROR, 592 "**>> yaffs write required %d attempts", 593 attempts); 594 dev->n_retried_writes += (attempts - 1); 595 } 596 597 return chunk; 598 } 599 600 /* 601 * Block retiring for handling a broken block. 602 */ 603 604 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block) 605 { 606 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); 607 608 yaffs2_checkpt_invalidate(dev); 609 610 yaffs2_clear_oldest_dirty_seq(dev, bi); 611 612 if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) { 613 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) { 614 yaffs_trace(YAFFS_TRACE_ALWAYS, 615 "yaffs: Failed to mark bad and erase block %d", 616 flash_block); 617 } else { 618 struct yaffs_ext_tags tags; 619 int chunk_id = 620 flash_block * dev->param.chunks_per_block; 621 622 u8 *buffer = yaffs_get_temp_buffer(dev); 623 624 memset(buffer, 0xff, dev->data_bytes_per_chunk); 625 memset(&tags, 0, sizeof(tags)); 626 tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK; 627 if (dev->param.write_chunk_tags_fn(dev, chunk_id - 628 dev->chunk_offset, 629 buffer, 630 &tags) != YAFFS_OK) 631 yaffs_trace(YAFFS_TRACE_ALWAYS, 632 "yaffs: Failed to write bad block marker to block %d", 633 flash_block); 634 635 yaffs_release_temp_buffer(dev, buffer); 636 } 637 } 638 639 bi->block_state = YAFFS_BLOCK_STATE_DEAD; 640 bi->gc_prioritise = 0; 641 bi->needs_retiring = 0; 642 643 dev->n_retired_blocks++; 644 } 645 646 /*---------------- Name handling functions ------------*/ 647 648 static u16 yaffs_calc_name_sum(const YCHAR *name) 649 { 650 u16 sum = 0; 651 u16 i = 1; 652 653 if (!name) 654 return 0; 655 656 while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) { 657 658 /* 0x1f mask is case insensitive */ 659 sum += ((*name) & 0x1f) * i; 660 i++; 661 name++; 662 } 663 return sum; 664 } 665 666 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name) 667 { 668 memset(obj->short_name, 0, sizeof(obj->short_name)); 669 if (name && 670 yaffs_strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <= 671 YAFFS_SHORT_NAME_LENGTH) 672 yaffs_strcpy(obj->short_name, name); 673 else 674 obj->short_name[0] = _Y('\0'); 675 obj->sum = yaffs_calc_name_sum(name); 676 } 677 678 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj, 679 const struct yaffs_obj_hdr *oh) 680 { 681 #ifdef CONFIG_YAFFS_AUTO_UNICODE 682 YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1]; 683 memset(tmp_name, 0, sizeof(tmp_name)); 684 yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name, 685 YAFFS_MAX_NAME_LENGTH + 1); 686 yaffs_set_obj_name(obj, tmp_name); 687 #else 688 yaffs_set_obj_name(obj, oh->name); 689 #endif 690 } 691 692 loff_t yaffs_max_file_size(struct yaffs_dev *dev) 693 { 694 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk; 695 } 696 697 /*-------------------- TNODES ------------------- 698 699 * List of spare tnodes 700 * The list is hooked together using the first pointer 701 * in the tnode. 702 */ 703 704 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev) 705 { 706 struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev); 707 708 if (tn) { 709 memset(tn, 0, dev->tnode_size); 710 dev->n_tnodes++; 711 } 712 713 dev->checkpoint_blocks_required = 0; /* force recalculation */ 714 715 return tn; 716 } 717 718 /* FreeTnode frees up a tnode and puts it back on the free list */ 719 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn) 720 { 721 yaffs_free_raw_tnode(dev, tn); 722 dev->n_tnodes--; 723 dev->checkpoint_blocks_required = 0; /* force recalculation */ 724 } 725 726 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev) 727 { 728 yaffs_deinit_raw_tnodes_and_objs(dev); 729 dev->n_obj = 0; 730 dev->n_tnodes = 0; 731 } 732 733 void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn, 734 unsigned pos, unsigned val) 735 { 736 u32 *map = (u32 *) tn; 737 u32 bit_in_map; 738 u32 bit_in_word; 739 u32 word_in_map; 740 u32 mask; 741 742 pos &= YAFFS_TNODES_LEVEL0_MASK; 743 val >>= dev->chunk_grp_bits; 744 745 bit_in_map = pos * dev->tnode_width; 746 word_in_map = bit_in_map / 32; 747 bit_in_word = bit_in_map & (32 - 1); 748 749 mask = dev->tnode_mask << bit_in_word; 750 751 map[word_in_map] &= ~mask; 752 map[word_in_map] |= (mask & (val << bit_in_word)); 753 754 if (dev->tnode_width > (32 - bit_in_word)) { 755 bit_in_word = (32 - bit_in_word); 756 word_in_map++; 757 mask = 758 dev->tnode_mask >> bit_in_word; 759 map[word_in_map] &= ~mask; 760 map[word_in_map] |= (mask & (val >> bit_in_word)); 761 } 762 } 763 764 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn, 765 unsigned pos) 766 { 767 u32 *map = (u32 *) tn; 768 u32 bit_in_map; 769 u32 bit_in_word; 770 u32 word_in_map; 771 u32 val; 772 773 pos &= YAFFS_TNODES_LEVEL0_MASK; 774 775 bit_in_map = pos * dev->tnode_width; 776 word_in_map = bit_in_map / 32; 777 bit_in_word = bit_in_map & (32 - 1); 778 779 val = map[word_in_map] >> bit_in_word; 780 781 if (dev->tnode_width > (32 - bit_in_word)) { 782 bit_in_word = (32 - bit_in_word); 783 word_in_map++; 784 val |= (map[word_in_map] << bit_in_word); 785 } 786 787 val &= dev->tnode_mask; 788 val <<= dev->chunk_grp_bits; 789 790 return val; 791 } 792 793 /* ------------------- End of individual tnode manipulation -----------------*/ 794 795 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------ 796 * The look up tree is represented by the top tnode and the number of top_level 797 * in the tree. 0 means only the level 0 tnode is in the tree. 798 */ 799 800 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */ 801 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev, 802 struct yaffs_file_var *file_struct, 803 u32 chunk_id) 804 { 805 struct yaffs_tnode *tn = file_struct->top; 806 u32 i; 807 int required_depth; 808 int level = file_struct->top_level; 809 810 /* Check sane level and chunk Id */ 811 if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) 812 return NULL; 813 814 if (chunk_id > YAFFS_MAX_CHUNK_ID) 815 return NULL; 816 817 /* First check we're tall enough (ie enough top_level) */ 818 819 i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; 820 required_depth = 0; 821 while (i) { 822 i >>= YAFFS_TNODES_INTERNAL_BITS; 823 required_depth++; 824 } 825 826 if (required_depth > file_struct->top_level) 827 return NULL; /* Not tall enough, so we can't find it */ 828 829 /* Traverse down to level 0 */ 830 while (level > 0 && tn) { 831 tn = tn->internal[(chunk_id >> 832 (YAFFS_TNODES_LEVEL0_BITS + 833 (level - 1) * 834 YAFFS_TNODES_INTERNAL_BITS)) & 835 YAFFS_TNODES_INTERNAL_MASK]; 836 level--; 837 } 838 839 return tn; 840 } 841 842 /* add_find_tnode_0 finds the level 0 tnode if it exists, 843 * otherwise first expands the tree. 844 * This happens in two steps: 845 * 1. If the tree isn't tall enough, then make it taller. 846 * 2. Scan down the tree towards the level 0 tnode adding tnodes if required. 847 * 848 * Used when modifying the tree. 849 * 850 * If the tn argument is NULL, then a fresh tnode will be added otherwise the 851 * specified tn will be plugged into the ttree. 852 */ 853 854 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev, 855 struct yaffs_file_var *file_struct, 856 u32 chunk_id, 857 struct yaffs_tnode *passed_tn) 858 { 859 int required_depth; 860 int i; 861 int l; 862 struct yaffs_tnode *tn; 863 u32 x; 864 865 /* Check sane level and page Id */ 866 if (file_struct->top_level < 0 || 867 file_struct->top_level > YAFFS_TNODES_MAX_LEVEL) 868 return NULL; 869 870 if (chunk_id > YAFFS_MAX_CHUNK_ID) 871 return NULL; 872 873 /* First check we're tall enough (ie enough top_level) */ 874 875 x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; 876 required_depth = 0; 877 while (x) { 878 x >>= YAFFS_TNODES_INTERNAL_BITS; 879 required_depth++; 880 } 881 882 if (required_depth > file_struct->top_level) { 883 /* Not tall enough, gotta make the tree taller */ 884 for (i = file_struct->top_level; i < required_depth; i++) { 885 886 tn = yaffs_get_tnode(dev); 887 888 if (tn) { 889 tn->internal[0] = file_struct->top; 890 file_struct->top = tn; 891 file_struct->top_level++; 892 } else { 893 yaffs_trace(YAFFS_TRACE_ERROR, 894 "yaffs: no more tnodes"); 895 return NULL; 896 } 897 } 898 } 899 900 /* Traverse down to level 0, adding anything we need */ 901 902 l = file_struct->top_level; 903 tn = file_struct->top; 904 905 if (l > 0) { 906 while (l > 0 && tn) { 907 x = (chunk_id >> 908 (YAFFS_TNODES_LEVEL0_BITS + 909 (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) & 910 YAFFS_TNODES_INTERNAL_MASK; 911 912 if ((l > 1) && !tn->internal[x]) { 913 /* Add missing non-level-zero tnode */ 914 tn->internal[x] = yaffs_get_tnode(dev); 915 if (!tn->internal[x]) 916 return NULL; 917 } else if (l == 1) { 918 /* Looking from level 1 at level 0 */ 919 if (passed_tn) { 920 /* If we already have one, release it */ 921 if (tn->internal[x]) 922 yaffs_free_tnode(dev, 923 tn->internal[x]); 924 tn->internal[x] = passed_tn; 925 926 } else if (!tn->internal[x]) { 927 /* Don't have one, none passed in */ 928 tn->internal[x] = yaffs_get_tnode(dev); 929 if (!tn->internal[x]) 930 return NULL; 931 } 932 } 933 934 tn = tn->internal[x]; 935 l--; 936 } 937 } else { 938 /* top is level 0 */ 939 if (passed_tn) { 940 memcpy(tn, passed_tn, 941 (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8); 942 yaffs_free_tnode(dev, passed_tn); 943 } 944 } 945 946 return tn; 947 } 948 949 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id, 950 int chunk_obj) 951 { 952 return (tags->chunk_id == chunk_obj && 953 tags->obj_id == obj_id && 954 !tags->is_deleted) ? 1 : 0; 955 956 } 957 958 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk, 959 struct yaffs_ext_tags *tags, int obj_id, 960 int inode_chunk) 961 { 962 int j; 963 964 for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) { 965 if (yaffs_check_chunk_bit 966 (dev, the_chunk / dev->param.chunks_per_block, 967 the_chunk % dev->param.chunks_per_block)) { 968 969 if (dev->chunk_grp_size == 1) 970 return the_chunk; 971 else { 972 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL, 973 tags); 974 if (yaffs_tags_match(tags, 975 obj_id, inode_chunk)) { 976 /* found it; */ 977 return the_chunk; 978 } 979 } 980 } 981 the_chunk++; 982 } 983 return -1; 984 } 985 986 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk, 987 struct yaffs_ext_tags *tags) 988 { 989 /*Get the Tnode, then get the level 0 offset chunk offset */ 990 struct yaffs_tnode *tn; 991 int the_chunk = -1; 992 struct yaffs_ext_tags local_tags; 993 int ret_val = -1; 994 struct yaffs_dev *dev = in->my_dev; 995 996 if (!tags) { 997 /* Passed a NULL, so use our own tags space */ 998 tags = &local_tags; 999 } 1000 1001 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); 1002 1003 if (!tn) 1004 return ret_val; 1005 1006 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); 1007 1008 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, 1009 inode_chunk); 1010 return ret_val; 1011 } 1012 1013 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk, 1014 struct yaffs_ext_tags *tags) 1015 { 1016 /* Get the Tnode, then get the level 0 offset chunk offset */ 1017 struct yaffs_tnode *tn; 1018 int the_chunk = -1; 1019 struct yaffs_ext_tags local_tags; 1020 struct yaffs_dev *dev = in->my_dev; 1021 int ret_val = -1; 1022 1023 if (!tags) { 1024 /* Passed a NULL, so use our own tags space */ 1025 tags = &local_tags; 1026 } 1027 1028 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); 1029 1030 if (!tn) 1031 return ret_val; 1032 1033 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); 1034 1035 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, 1036 inode_chunk); 1037 1038 /* Delete the entry in the filestructure (if found) */ 1039 if (ret_val != -1) 1040 yaffs_load_tnode_0(dev, tn, inode_chunk, 0); 1041 1042 return ret_val; 1043 } 1044 1045 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk, 1046 int nand_chunk, int in_scan) 1047 { 1048 /* NB in_scan is zero unless scanning. 1049 * For forward scanning, in_scan is > 0; 1050 * for backward scanning in_scan is < 0 1051 * 1052 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there. 1053 */ 1054 1055 struct yaffs_tnode *tn; 1056 struct yaffs_dev *dev = in->my_dev; 1057 int existing_cunk; 1058 struct yaffs_ext_tags existing_tags; 1059 struct yaffs_ext_tags new_tags; 1060 unsigned existing_serial, new_serial; 1061 1062 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) { 1063 /* Just ignore an attempt at putting a chunk into a non-file 1064 * during scanning. 1065 * If it is not during Scanning then something went wrong! 1066 */ 1067 if (!in_scan) { 1068 yaffs_trace(YAFFS_TRACE_ERROR, 1069 "yaffs tragedy:attempt to put data chunk into a non-file" 1070 ); 1071 BUG(); 1072 } 1073 1074 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); 1075 return YAFFS_OK; 1076 } 1077 1078 tn = yaffs_add_find_tnode_0(dev, 1079 &in->variant.file_variant, 1080 inode_chunk, NULL); 1081 if (!tn) 1082 return YAFFS_FAIL; 1083 1084 if (!nand_chunk) 1085 /* Dummy insert, bail now */ 1086 return YAFFS_OK; 1087 1088 existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk); 1089 1090 if (in_scan != 0) { 1091 /* If we're scanning then we need to test for duplicates 1092 * NB This does not need to be efficient since it should only 1093 * happen when the power fails during a write, then only one 1094 * chunk should ever be affected. 1095 * 1096 * Correction for YAFFS2: This could happen quite a lot and we 1097 * need to think about efficiency! TODO 1098 * Update: For backward scanning we don't need to re-read tags 1099 * so this is quite cheap. 1100 */ 1101 1102 if (existing_cunk > 0) { 1103 /* NB Right now existing chunk will not be real 1104 * chunk_id if the chunk group size > 1 1105 * thus we have to do a FindChunkInFile to get the 1106 * real chunk id. 1107 * 1108 * We have a duplicate now we need to decide which 1109 * one to use: 1110 * 1111 * Backwards scanning YAFFS2: The old one is what 1112 * we use, dump the new one. 1113 * YAFFS1: Get both sets of tags and compare serial 1114 * numbers. 1115 */ 1116 1117 if (in_scan > 0) { 1118 /* Only do this for forward scanning */ 1119 yaffs_rd_chunk_tags_nand(dev, 1120 nand_chunk, 1121 NULL, &new_tags); 1122 1123 /* Do a proper find */ 1124 existing_cunk = 1125 yaffs_find_chunk_in_file(in, inode_chunk, 1126 &existing_tags); 1127 } 1128 1129 if (existing_cunk <= 0) { 1130 /*Hoosterman - how did this happen? */ 1131 1132 yaffs_trace(YAFFS_TRACE_ERROR, 1133 "yaffs tragedy: existing chunk < 0 in scan" 1134 ); 1135 1136 } 1137 1138 /* NB The deleted flags should be false, otherwise 1139 * the chunks will not be loaded during a scan 1140 */ 1141 1142 if (in_scan > 0) { 1143 new_serial = new_tags.serial_number; 1144 existing_serial = existing_tags.serial_number; 1145 } 1146 1147 if ((in_scan > 0) && 1148 (existing_cunk <= 0 || 1149 ((existing_serial + 1) & 3) == new_serial)) { 1150 /* Forward scanning. 1151 * Use new 1152 * Delete the old one and drop through to 1153 * update the tnode 1154 */ 1155 yaffs_chunk_del(dev, existing_cunk, 1, 1156 __LINE__); 1157 } else { 1158 /* Backward scanning or we want to use the 1159 * existing one 1160 * Delete the new one and return early so that 1161 * the tnode isn't changed 1162 */ 1163 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); 1164 return YAFFS_OK; 1165 } 1166 } 1167 1168 } 1169 1170 if (existing_cunk == 0) 1171 in->n_data_chunks++; 1172 1173 yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk); 1174 1175 return YAFFS_OK; 1176 } 1177 1178 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk) 1179 { 1180 struct yaffs_block_info *the_block; 1181 unsigned block_no; 1182 1183 yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk); 1184 1185 block_no = chunk / dev->param.chunks_per_block; 1186 the_block = yaffs_get_block_info(dev, block_no); 1187 if (the_block) { 1188 the_block->soft_del_pages++; 1189 dev->n_free_chunks++; 1190 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block); 1191 } 1192 } 1193 1194 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all 1195 * the chunks in the file. 1196 * All soft deleting does is increment the block's softdelete count and pulls 1197 * the chunk out of the tnode. 1198 * Thus, essentially this is the same as DeleteWorker except that the chunks 1199 * are soft deleted. 1200 */ 1201 1202 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn, 1203 u32 level, int chunk_offset) 1204 { 1205 int i; 1206 int the_chunk; 1207 int all_done = 1; 1208 struct yaffs_dev *dev = in->my_dev; 1209 1210 if (!tn) 1211 return 1; 1212 1213 if (level > 0) { 1214 for (i = YAFFS_NTNODES_INTERNAL - 1; 1215 all_done && i >= 0; 1216 i--) { 1217 if (tn->internal[i]) { 1218 all_done = 1219 yaffs_soft_del_worker(in, 1220 tn->internal[i], 1221 level - 1, 1222 (chunk_offset << 1223 YAFFS_TNODES_INTERNAL_BITS) 1224 + i); 1225 if (all_done) { 1226 yaffs_free_tnode(dev, 1227 tn->internal[i]); 1228 tn->internal[i] = NULL; 1229 } else { 1230 /* Can this happen? */ 1231 } 1232 } 1233 } 1234 return (all_done) ? 1 : 0; 1235 } 1236 1237 /* level 0 */ 1238 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) { 1239 the_chunk = yaffs_get_group_base(dev, tn, i); 1240 if (the_chunk) { 1241 yaffs_soft_del_chunk(dev, the_chunk); 1242 yaffs_load_tnode_0(dev, tn, i, 0); 1243 } 1244 } 1245 return 1; 1246 } 1247 1248 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj) 1249 { 1250 struct yaffs_dev *dev = obj->my_dev; 1251 struct yaffs_obj *parent; 1252 1253 yaffs_verify_obj_in_dir(obj); 1254 parent = obj->parent; 1255 1256 yaffs_verify_dir(parent); 1257 1258 if (dev && dev->param.remove_obj_fn) 1259 dev->param.remove_obj_fn(obj); 1260 1261 list_del_init(&obj->siblings); 1262 obj->parent = NULL; 1263 1264 yaffs_verify_dir(parent); 1265 } 1266 1267 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj) 1268 { 1269 if (!directory) { 1270 yaffs_trace(YAFFS_TRACE_ALWAYS, 1271 "tragedy: Trying to add an object to a null pointer directory" 1272 ); 1273 BUG(); 1274 return; 1275 } 1276 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 1277 yaffs_trace(YAFFS_TRACE_ALWAYS, 1278 "tragedy: Trying to add an object to a non-directory" 1279 ); 1280 BUG(); 1281 } 1282 1283 if (obj->siblings.prev == NULL) { 1284 /* Not initialised */ 1285 BUG(); 1286 } 1287 1288 yaffs_verify_dir(directory); 1289 1290 yaffs_remove_obj_from_dir(obj); 1291 1292 /* Now add it */ 1293 list_add(&obj->siblings, &directory->variant.dir_variant.children); 1294 obj->parent = directory; 1295 1296 if (directory == obj->my_dev->unlinked_dir 1297 || directory == obj->my_dev->del_dir) { 1298 obj->unlinked = 1; 1299 obj->my_dev->n_unlinked_files++; 1300 obj->rename_allowed = 0; 1301 } 1302 1303 yaffs_verify_dir(directory); 1304 yaffs_verify_obj_in_dir(obj); 1305 } 1306 1307 static int yaffs_change_obj_name(struct yaffs_obj *obj, 1308 struct yaffs_obj *new_dir, 1309 const YCHAR *new_name, int force, int shadows) 1310 { 1311 int unlink_op; 1312 int del_op; 1313 struct yaffs_obj *existing_target; 1314 1315 if (new_dir == NULL) 1316 new_dir = obj->parent; /* use the old directory */ 1317 1318 if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 1319 yaffs_trace(YAFFS_TRACE_ALWAYS, 1320 "tragedy: yaffs_change_obj_name: new_dir is not a directory" 1321 ); 1322 BUG(); 1323 } 1324 1325 unlink_op = (new_dir == obj->my_dev->unlinked_dir); 1326 del_op = (new_dir == obj->my_dev->del_dir); 1327 1328 existing_target = yaffs_find_by_name(new_dir, new_name); 1329 1330 /* If the object is a file going into the unlinked directory, 1331 * then it is OK to just stuff it in since duplicate names are OK. 1332 * else only proceed if the new name does not exist and we're putting 1333 * it into a directory. 1334 */ 1335 if (!(unlink_op || del_op || force || 1336 shadows > 0 || !existing_target) || 1337 new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) 1338 return YAFFS_FAIL; 1339 1340 yaffs_set_obj_name(obj, new_name); 1341 obj->dirty = 1; 1342 yaffs_add_obj_to_dir(new_dir, obj); 1343 1344 if (unlink_op) 1345 obj->unlinked = 1; 1346 1347 /* If it is a deletion then we mark it as a shrink for gc */ 1348 if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0) 1349 return YAFFS_OK; 1350 1351 return YAFFS_FAIL; 1352 } 1353 1354 /*------------------------ Short Operations Cache ------------------------------ 1355 * In many situations where there is no high level buffering a lot of 1356 * reads might be short sequential reads, and a lot of writes may be short 1357 * sequential writes. eg. scanning/writing a jpeg file. 1358 * In these cases, a short read/write cache can provide a huge perfomance 1359 * benefit with dumb-as-a-rock code. 1360 * In Linux, the page cache provides read buffering and the short op cache 1361 * provides write buffering. 1362 * 1363 * There are a small number (~10) of cache chunks per device so that we don't 1364 * need a very intelligent search. 1365 */ 1366 1367 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj) 1368 { 1369 struct yaffs_dev *dev = obj->my_dev; 1370 int i; 1371 struct yaffs_cache *cache; 1372 int n_caches = obj->my_dev->param.n_caches; 1373 1374 for (i = 0; i < n_caches; i++) { 1375 cache = &dev->cache[i]; 1376 if (cache->object == obj && cache->dirty) 1377 return 1; 1378 } 1379 1380 return 0; 1381 } 1382 1383 static void yaffs_flush_file_cache(struct yaffs_obj *obj) 1384 { 1385 struct yaffs_dev *dev = obj->my_dev; 1386 int lowest = -99; /* Stop compiler whining. */ 1387 int i; 1388 struct yaffs_cache *cache; 1389 int chunk_written = 0; 1390 int n_caches = obj->my_dev->param.n_caches; 1391 1392 if (n_caches < 1) 1393 return; 1394 do { 1395 cache = NULL; 1396 1397 /* Find the lowest dirty chunk for this object */ 1398 for (i = 0; i < n_caches; i++) { 1399 if (dev->cache[i].object == obj && 1400 dev->cache[i].dirty) { 1401 if (!cache || 1402 dev->cache[i].chunk_id < lowest) { 1403 cache = &dev->cache[i]; 1404 lowest = cache->chunk_id; 1405 } 1406 } 1407 } 1408 1409 if (cache && !cache->locked) { 1410 /* Write it out and free it up */ 1411 chunk_written = 1412 yaffs_wr_data_obj(cache->object, 1413 cache->chunk_id, 1414 cache->data, 1415 cache->n_bytes, 1); 1416 cache->dirty = 0; 1417 cache->object = NULL; 1418 } 1419 } while (cache && chunk_written > 0); 1420 1421 if (cache) 1422 /* Hoosterman, disk full while writing cache out. */ 1423 yaffs_trace(YAFFS_TRACE_ERROR, 1424 "yaffs tragedy: no space during cache write"); 1425 } 1426 1427 /*yaffs_flush_whole_cache(dev) 1428 * 1429 * 1430 */ 1431 1432 void yaffs_flush_whole_cache(struct yaffs_dev *dev) 1433 { 1434 struct yaffs_obj *obj; 1435 int n_caches = dev->param.n_caches; 1436 int i; 1437 1438 /* Find a dirty object in the cache and flush it... 1439 * until there are no further dirty objects. 1440 */ 1441 do { 1442 obj = NULL; 1443 for (i = 0; i < n_caches && !obj; i++) { 1444 if (dev->cache[i].object && dev->cache[i].dirty) 1445 obj = dev->cache[i].object; 1446 } 1447 if (obj) 1448 yaffs_flush_file_cache(obj); 1449 } while (obj); 1450 1451 } 1452 1453 /* Grab us a cache chunk for use. 1454 * First look for an empty one. 1455 * Then look for the least recently used non-dirty one. 1456 * Then look for the least recently used dirty one...., flush and look again. 1457 */ 1458 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev) 1459 { 1460 int i; 1461 1462 if (dev->param.n_caches > 0) { 1463 for (i = 0; i < dev->param.n_caches; i++) { 1464 if (!dev->cache[i].object) 1465 return &dev->cache[i]; 1466 } 1467 } 1468 return NULL; 1469 } 1470 1471 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev) 1472 { 1473 struct yaffs_cache *cache; 1474 struct yaffs_obj *the_obj; 1475 int usage; 1476 int i; 1477 1478 if (dev->param.n_caches < 1) 1479 return NULL; 1480 1481 /* Try find a non-dirty one... */ 1482 1483 cache = yaffs_grab_chunk_worker(dev); 1484 1485 if (!cache) { 1486 /* They were all dirty, find the LRU object and flush 1487 * its cache, then find again. 1488 * NB what's here is not very accurate, 1489 * we actually flush the object with the LRU chunk. 1490 */ 1491 1492 /* With locking we can't assume we can use entry zero, 1493 * Set the_obj to a valid pointer for Coverity. */ 1494 the_obj = dev->cache[0].object; 1495 usage = -1; 1496 cache = NULL; 1497 1498 for (i = 0; i < dev->param.n_caches; i++) { 1499 if (dev->cache[i].object && 1500 !dev->cache[i].locked && 1501 (dev->cache[i].last_use < usage || 1502 !cache)) { 1503 usage = dev->cache[i].last_use; 1504 the_obj = dev->cache[i].object; 1505 cache = &dev->cache[i]; 1506 } 1507 } 1508 1509 if (!cache || cache->dirty) { 1510 /* Flush and try again */ 1511 yaffs_flush_file_cache(the_obj); 1512 cache = yaffs_grab_chunk_worker(dev); 1513 } 1514 } 1515 return cache; 1516 } 1517 1518 /* Find a cached chunk */ 1519 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj, 1520 int chunk_id) 1521 { 1522 struct yaffs_dev *dev = obj->my_dev; 1523 int i; 1524 1525 if (dev->param.n_caches < 1) 1526 return NULL; 1527 1528 for (i = 0; i < dev->param.n_caches; i++) { 1529 if (dev->cache[i].object == obj && 1530 dev->cache[i].chunk_id == chunk_id) { 1531 dev->cache_hits++; 1532 1533 return &dev->cache[i]; 1534 } 1535 } 1536 return NULL; 1537 } 1538 1539 /* Mark the chunk for the least recently used algorithym */ 1540 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache, 1541 int is_write) 1542 { 1543 int i; 1544 1545 if (dev->param.n_caches < 1) 1546 return; 1547 1548 if (dev->cache_last_use < 0 || 1549 dev->cache_last_use > 100000000) { 1550 /* Reset the cache usages */ 1551 for (i = 1; i < dev->param.n_caches; i++) 1552 dev->cache[i].last_use = 0; 1553 1554 dev->cache_last_use = 0; 1555 } 1556 dev->cache_last_use++; 1557 cache->last_use = dev->cache_last_use; 1558 1559 if (is_write) 1560 cache->dirty = 1; 1561 } 1562 1563 /* Invalidate a single cache page. 1564 * Do this when a whole page gets written, 1565 * ie the short cache for this page is no longer valid. 1566 */ 1567 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id) 1568 { 1569 struct yaffs_cache *cache; 1570 1571 if (object->my_dev->param.n_caches > 0) { 1572 cache = yaffs_find_chunk_cache(object, chunk_id); 1573 1574 if (cache) 1575 cache->object = NULL; 1576 } 1577 } 1578 1579 /* Invalidate all the cache pages associated with this object 1580 * Do this whenever ther file is deleted or resized. 1581 */ 1582 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in) 1583 { 1584 int i; 1585 struct yaffs_dev *dev = in->my_dev; 1586 1587 if (dev->param.n_caches > 0) { 1588 /* Invalidate it. */ 1589 for (i = 0; i < dev->param.n_caches; i++) { 1590 if (dev->cache[i].object == in) 1591 dev->cache[i].object = NULL; 1592 } 1593 } 1594 } 1595 1596 static void yaffs_unhash_obj(struct yaffs_obj *obj) 1597 { 1598 int bucket; 1599 struct yaffs_dev *dev = obj->my_dev; 1600 1601 /* If it is still linked into the bucket list, free from the list */ 1602 if (!list_empty(&obj->hash_link)) { 1603 list_del_init(&obj->hash_link); 1604 bucket = yaffs_hash_fn(obj->obj_id); 1605 dev->obj_bucket[bucket].count--; 1606 } 1607 } 1608 1609 /* FreeObject frees up a Object and puts it back on the free list */ 1610 static void yaffs_free_obj(struct yaffs_obj *obj) 1611 { 1612 struct yaffs_dev *dev; 1613 1614 if (!obj) { 1615 BUG(); 1616 return; 1617 } 1618 dev = obj->my_dev; 1619 yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p", 1620 obj, obj->my_inode); 1621 if (obj->parent) 1622 BUG(); 1623 if (!list_empty(&obj->siblings)) 1624 BUG(); 1625 1626 if (obj->my_inode) { 1627 /* We're still hooked up to a cached inode. 1628 * Don't delete now, but mark for later deletion 1629 */ 1630 obj->defered_free = 1; 1631 return; 1632 } 1633 1634 yaffs_unhash_obj(obj); 1635 1636 yaffs_free_raw_obj(dev, obj); 1637 dev->n_obj--; 1638 dev->checkpoint_blocks_required = 0; /* force recalculation */ 1639 } 1640 1641 void yaffs_handle_defered_free(struct yaffs_obj *obj) 1642 { 1643 if (obj->defered_free) 1644 yaffs_free_obj(obj); 1645 } 1646 1647 static int yaffs_generic_obj_del(struct yaffs_obj *in) 1648 { 1649 /* Iinvalidate the file's data in the cache, without flushing. */ 1650 yaffs_invalidate_whole_cache(in); 1651 1652 if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) { 1653 /* Move to unlinked directory so we have a deletion record */ 1654 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0, 1655 0); 1656 } 1657 1658 yaffs_remove_obj_from_dir(in); 1659 yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__); 1660 in->hdr_chunk = 0; 1661 1662 yaffs_free_obj(in); 1663 return YAFFS_OK; 1664 1665 } 1666 1667 static void yaffs_soft_del_file(struct yaffs_obj *obj) 1668 { 1669 if (!obj->deleted || 1670 obj->variant_type != YAFFS_OBJECT_TYPE_FILE || 1671 obj->soft_del) 1672 return; 1673 1674 if (obj->n_data_chunks <= 0) { 1675 /* Empty file with no duplicate object headers, 1676 * just delete it immediately */ 1677 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top); 1678 obj->variant.file_variant.top = NULL; 1679 yaffs_trace(YAFFS_TRACE_TRACING, 1680 "yaffs: Deleting empty file %d", 1681 obj->obj_id); 1682 yaffs_generic_obj_del(obj); 1683 } else { 1684 yaffs_soft_del_worker(obj, 1685 obj->variant.file_variant.top, 1686 obj->variant. 1687 file_variant.top_level, 0); 1688 obj->soft_del = 1; 1689 } 1690 } 1691 1692 /* Pruning removes any part of the file structure tree that is beyond the 1693 * bounds of the file (ie that does not point to chunks). 1694 * 1695 * A file should only get pruned when its size is reduced. 1696 * 1697 * Before pruning, the chunks must be pulled from the tree and the 1698 * level 0 tnode entries must be zeroed out. 1699 * Could also use this for file deletion, but that's probably better handled 1700 * by a special case. 1701 * 1702 * This function is recursive. For levels > 0 the function is called again on 1703 * any sub-tree. For level == 0 we just check if the sub-tree has data. 1704 * If there is no data in a subtree then it is pruned. 1705 */ 1706 1707 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev, 1708 struct yaffs_tnode *tn, u32 level, 1709 int del0) 1710 { 1711 int i; 1712 int has_data; 1713 1714 if (!tn) 1715 return tn; 1716 1717 has_data = 0; 1718 1719 if (level > 0) { 1720 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) { 1721 if (tn->internal[i]) { 1722 tn->internal[i] = 1723 yaffs_prune_worker(dev, 1724 tn->internal[i], 1725 level - 1, 1726 (i == 0) ? del0 : 1); 1727 } 1728 1729 if (tn->internal[i]) 1730 has_data++; 1731 } 1732 } else { 1733 int tnode_size_u32 = dev->tnode_size / sizeof(u32); 1734 u32 *map = (u32 *) tn; 1735 1736 for (i = 0; !has_data && i < tnode_size_u32; i++) { 1737 if (map[i]) 1738 has_data++; 1739 } 1740 } 1741 1742 if (has_data == 0 && del0) { 1743 /* Free and return NULL */ 1744 yaffs_free_tnode(dev, tn); 1745 tn = NULL; 1746 } 1747 return tn; 1748 } 1749 1750 static int yaffs_prune_tree(struct yaffs_dev *dev, 1751 struct yaffs_file_var *file_struct) 1752 { 1753 int i; 1754 int has_data; 1755 int done = 0; 1756 struct yaffs_tnode *tn; 1757 1758 if (file_struct->top_level < 1) 1759 return YAFFS_OK; 1760 1761 file_struct->top = 1762 yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0); 1763 1764 /* Now we have a tree with all the non-zero branches NULL but 1765 * the height is the same as it was. 1766 * Let's see if we can trim internal tnodes to shorten the tree. 1767 * We can do this if only the 0th element in the tnode is in use 1768 * (ie all the non-zero are NULL) 1769 */ 1770 1771 while (file_struct->top_level && !done) { 1772 tn = file_struct->top; 1773 1774 has_data = 0; 1775 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) { 1776 if (tn->internal[i]) 1777 has_data++; 1778 } 1779 1780 if (!has_data) { 1781 file_struct->top = tn->internal[0]; 1782 file_struct->top_level--; 1783 yaffs_free_tnode(dev, tn); 1784 } else { 1785 done = 1; 1786 } 1787 } 1788 1789 return YAFFS_OK; 1790 } 1791 1792 /*-------------------- End of File Structure functions.-------------------*/ 1793 1794 /* alloc_empty_obj gets us a clean Object.*/ 1795 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev) 1796 { 1797 struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev); 1798 1799 if (!obj) 1800 return obj; 1801 1802 dev->n_obj++; 1803 1804 /* Now sweeten it up... */ 1805 1806 memset(obj, 0, sizeof(struct yaffs_obj)); 1807 obj->being_created = 1; 1808 1809 obj->my_dev = dev; 1810 obj->hdr_chunk = 0; 1811 obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN; 1812 INIT_LIST_HEAD(&(obj->hard_links)); 1813 INIT_LIST_HEAD(&(obj->hash_link)); 1814 INIT_LIST_HEAD(&obj->siblings); 1815 1816 /* Now make the directory sane */ 1817 if (dev->root_dir) { 1818 obj->parent = dev->root_dir; 1819 list_add(&(obj->siblings), 1820 &dev->root_dir->variant.dir_variant.children); 1821 } 1822 1823 /* Add it to the lost and found directory. 1824 * NB Can't put root or lost-n-found in lost-n-found so 1825 * check if lost-n-found exists first 1826 */ 1827 if (dev->lost_n_found) 1828 yaffs_add_obj_to_dir(dev->lost_n_found, obj); 1829 1830 obj->being_created = 0; 1831 1832 dev->checkpoint_blocks_required = 0; /* force recalculation */ 1833 1834 return obj; 1835 } 1836 1837 static int yaffs_find_nice_bucket(struct yaffs_dev *dev) 1838 { 1839 int i; 1840 int l = 999; 1841 int lowest = 999999; 1842 1843 /* Search for the shortest list or one that 1844 * isn't too long. 1845 */ 1846 1847 for (i = 0; i < 10 && lowest > 4; i++) { 1848 dev->bucket_finder++; 1849 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS; 1850 if (dev->obj_bucket[dev->bucket_finder].count < lowest) { 1851 lowest = dev->obj_bucket[dev->bucket_finder].count; 1852 l = dev->bucket_finder; 1853 } 1854 } 1855 1856 return l; 1857 } 1858 1859 static int yaffs_new_obj_id(struct yaffs_dev *dev) 1860 { 1861 int bucket = yaffs_find_nice_bucket(dev); 1862 int found = 0; 1863 struct list_head *i; 1864 u32 n = (u32) bucket; 1865 1866 /* Now find an object value that has not already been taken 1867 * by scanning the list. 1868 */ 1869 1870 while (!found) { 1871 found = 1; 1872 n += YAFFS_NOBJECT_BUCKETS; 1873 list_for_each(i, &dev->obj_bucket[bucket].list) { 1874 /* If there is already one in the list */ 1875 if (list_entry(i, struct yaffs_obj, 1876 hash_link)->obj_id == n) { 1877 found = 0; 1878 break; 1879 } 1880 } 1881 } 1882 return n; 1883 } 1884 1885 static void yaffs_hash_obj(struct yaffs_obj *in) 1886 { 1887 int bucket = yaffs_hash_fn(in->obj_id); 1888 struct yaffs_dev *dev = in->my_dev; 1889 1890 list_add(&in->hash_link, &dev->obj_bucket[bucket].list); 1891 dev->obj_bucket[bucket].count++; 1892 } 1893 1894 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number) 1895 { 1896 int bucket = yaffs_hash_fn(number); 1897 struct list_head *i; 1898 struct yaffs_obj *in; 1899 1900 list_for_each(i, &dev->obj_bucket[bucket].list) { 1901 /* Look if it is in the list */ 1902 in = list_entry(i, struct yaffs_obj, hash_link); 1903 if (in->obj_id == number) { 1904 /* Don't show if it is defered free */ 1905 if (in->defered_free) 1906 return NULL; 1907 return in; 1908 } 1909 } 1910 1911 return NULL; 1912 } 1913 1914 struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number, 1915 enum yaffs_obj_type type) 1916 { 1917 struct yaffs_obj *the_obj = NULL; 1918 struct yaffs_tnode *tn = NULL; 1919 1920 if (number < 0) 1921 number = yaffs_new_obj_id(dev); 1922 1923 if (type == YAFFS_OBJECT_TYPE_FILE) { 1924 tn = yaffs_get_tnode(dev); 1925 if (!tn) 1926 return NULL; 1927 } 1928 1929 the_obj = yaffs_alloc_empty_obj(dev); 1930 if (!the_obj) { 1931 if (tn) 1932 yaffs_free_tnode(dev, tn); 1933 return NULL; 1934 } 1935 1936 the_obj->fake = 0; 1937 the_obj->rename_allowed = 1; 1938 the_obj->unlink_allowed = 1; 1939 the_obj->obj_id = number; 1940 yaffs_hash_obj(the_obj); 1941 the_obj->variant_type = type; 1942 yaffs_load_current_time(the_obj, 1, 1); 1943 1944 switch (type) { 1945 case YAFFS_OBJECT_TYPE_FILE: 1946 the_obj->variant.file_variant.file_size = 0; 1947 the_obj->variant.file_variant.scanned_size = 0; 1948 the_obj->variant.file_variant.shrink_size = 1949 yaffs_max_file_size(dev); 1950 the_obj->variant.file_variant.top_level = 0; 1951 the_obj->variant.file_variant.top = tn; 1952 break; 1953 case YAFFS_OBJECT_TYPE_DIRECTORY: 1954 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children); 1955 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty); 1956 break; 1957 case YAFFS_OBJECT_TYPE_SYMLINK: 1958 case YAFFS_OBJECT_TYPE_HARDLINK: 1959 case YAFFS_OBJECT_TYPE_SPECIAL: 1960 /* No action required */ 1961 break; 1962 case YAFFS_OBJECT_TYPE_UNKNOWN: 1963 /* todo this should not happen */ 1964 break; 1965 } 1966 return the_obj; 1967 } 1968 1969 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev, 1970 int number, u32 mode) 1971 { 1972 1973 struct yaffs_obj *obj = 1974 yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY); 1975 1976 if (!obj) 1977 return NULL; 1978 1979 obj->fake = 1; /* it is fake so it might not use NAND */ 1980 obj->rename_allowed = 0; 1981 obj->unlink_allowed = 0; 1982 obj->deleted = 0; 1983 obj->unlinked = 0; 1984 obj->yst_mode = mode; 1985 obj->my_dev = dev; 1986 obj->hdr_chunk = 0; /* Not a valid chunk. */ 1987 return obj; 1988 1989 } 1990 1991 1992 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev) 1993 { 1994 int i; 1995 1996 dev->n_obj = 0; 1997 dev->n_tnodes = 0; 1998 yaffs_init_raw_tnodes_and_objs(dev); 1999 2000 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { 2001 INIT_LIST_HEAD(&dev->obj_bucket[i].list); 2002 dev->obj_bucket[i].count = 0; 2003 } 2004 } 2005 2006 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev, 2007 int number, 2008 enum yaffs_obj_type type) 2009 { 2010 struct yaffs_obj *the_obj = NULL; 2011 2012 if (number > 0) 2013 the_obj = yaffs_find_by_number(dev, number); 2014 2015 if (!the_obj) 2016 the_obj = yaffs_new_obj(dev, number, type); 2017 2018 return the_obj; 2019 2020 } 2021 2022 YCHAR *yaffs_clone_str(const YCHAR *str) 2023 { 2024 YCHAR *new_str = NULL; 2025 int len; 2026 2027 if (!str) 2028 str = _Y(""); 2029 2030 len = yaffs_strnlen(str, YAFFS_MAX_ALIAS_LENGTH); 2031 new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS); 2032 if (new_str) { 2033 yaffs_strncpy(new_str, str, len); 2034 new_str[len] = 0; 2035 } 2036 return new_str; 2037 2038 } 2039 /* 2040 *yaffs_update_parent() handles fixing a directories mtime and ctime when a new 2041 * link (ie. name) is created or deleted in the directory. 2042 * 2043 * ie. 2044 * create dir/a : update dir's mtime/ctime 2045 * rm dir/a: update dir's mtime/ctime 2046 * modify dir/a: don't update dir's mtimme/ctime 2047 * 2048 * This can be handled immediately or defered. Defering helps reduce the number 2049 * of updates when many files in a directory are changed within a brief period. 2050 * 2051 * If the directory updating is defered then yaffs_update_dirty_dirs must be 2052 * called periodically. 2053 */ 2054 2055 static void yaffs_update_parent(struct yaffs_obj *obj) 2056 { 2057 struct yaffs_dev *dev; 2058 2059 if (!obj) 2060 return; 2061 dev = obj->my_dev; 2062 obj->dirty = 1; 2063 yaffs_load_current_time(obj, 0, 1); 2064 if (dev->param.defered_dir_update) { 2065 struct list_head *link = &obj->variant.dir_variant.dirty; 2066 2067 if (list_empty(link)) { 2068 list_add(link, &dev->dirty_dirs); 2069 yaffs_trace(YAFFS_TRACE_BACKGROUND, 2070 "Added object %d to dirty directories", 2071 obj->obj_id); 2072 } 2073 2074 } else { 2075 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); 2076 } 2077 } 2078 2079 void yaffs_update_dirty_dirs(struct yaffs_dev *dev) 2080 { 2081 struct list_head *link; 2082 struct yaffs_obj *obj; 2083 struct yaffs_dir_var *d_s; 2084 union yaffs_obj_var *o_v; 2085 2086 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories"); 2087 2088 while (!list_empty(&dev->dirty_dirs)) { 2089 link = dev->dirty_dirs.next; 2090 list_del_init(link); 2091 2092 d_s = list_entry(link, struct yaffs_dir_var, dirty); 2093 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant); 2094 obj = list_entry(o_v, struct yaffs_obj, variant); 2095 2096 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d", 2097 obj->obj_id); 2098 2099 if (obj->dirty) 2100 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); 2101 } 2102 } 2103 2104 /* 2105 * Mknod (create) a new object. 2106 * equiv_obj only has meaning for a hard link; 2107 * alias_str only has meaning for a symlink. 2108 * rdev only has meaning for devices (a subset of special objects) 2109 */ 2110 2111 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type, 2112 struct yaffs_obj *parent, 2113 const YCHAR *name, 2114 u32 mode, 2115 u32 uid, 2116 u32 gid, 2117 struct yaffs_obj *equiv_obj, 2118 const YCHAR *alias_str, u32 rdev) 2119 { 2120 struct yaffs_obj *in; 2121 YCHAR *str = NULL; 2122 struct yaffs_dev *dev = parent->my_dev; 2123 2124 /* Check if the entry exists. 2125 * If it does then fail the call since we don't want a dup. */ 2126 if (yaffs_find_by_name(parent, name)) 2127 return NULL; 2128 2129 if (type == YAFFS_OBJECT_TYPE_SYMLINK) { 2130 str = yaffs_clone_str(alias_str); 2131 if (!str) 2132 return NULL; 2133 } 2134 2135 in = yaffs_new_obj(dev, -1, type); 2136 2137 if (!in) { 2138 kfree(str); 2139 return NULL; 2140 } 2141 2142 in->hdr_chunk = 0; 2143 in->valid = 1; 2144 in->variant_type = type; 2145 2146 in->yst_mode = mode; 2147 2148 yaffs_attribs_init(in, gid, uid, rdev); 2149 2150 in->n_data_chunks = 0; 2151 2152 yaffs_set_obj_name(in, name); 2153 in->dirty = 1; 2154 2155 yaffs_add_obj_to_dir(parent, in); 2156 2157 in->my_dev = parent->my_dev; 2158 2159 switch (type) { 2160 case YAFFS_OBJECT_TYPE_SYMLINK: 2161 in->variant.symlink_variant.alias = str; 2162 break; 2163 case YAFFS_OBJECT_TYPE_HARDLINK: 2164 in->variant.hardlink_variant.equiv_obj = equiv_obj; 2165 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id; 2166 list_add(&in->hard_links, &equiv_obj->hard_links); 2167 break; 2168 case YAFFS_OBJECT_TYPE_FILE: 2169 case YAFFS_OBJECT_TYPE_DIRECTORY: 2170 case YAFFS_OBJECT_TYPE_SPECIAL: 2171 case YAFFS_OBJECT_TYPE_UNKNOWN: 2172 /* do nothing */ 2173 break; 2174 } 2175 2176 if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) { 2177 /* Could not create the object header, fail */ 2178 yaffs_del_obj(in); 2179 in = NULL; 2180 } 2181 2182 if (in) 2183 yaffs_update_parent(parent); 2184 2185 return in; 2186 } 2187 2188 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent, 2189 const YCHAR *name, u32 mode, u32 uid, 2190 u32 gid) 2191 { 2192 return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode, 2193 uid, gid, NULL, NULL, 0); 2194 } 2195 2196 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name, 2197 u32 mode, u32 uid, u32 gid) 2198 { 2199 return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name, 2200 mode, uid, gid, NULL, NULL, 0); 2201 } 2202 2203 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent, 2204 const YCHAR *name, u32 mode, u32 uid, 2205 u32 gid, u32 rdev) 2206 { 2207 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode, 2208 uid, gid, NULL, NULL, rdev); 2209 } 2210 2211 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent, 2212 const YCHAR *name, u32 mode, u32 uid, 2213 u32 gid, const YCHAR *alias) 2214 { 2215 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode, 2216 uid, gid, NULL, alias, 0); 2217 } 2218 2219 /* yaffs_link_obj returns the object id of the equivalent object.*/ 2220 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name, 2221 struct yaffs_obj *equiv_obj) 2222 { 2223 /* Get the real object in case we were fed a hard link obj */ 2224 equiv_obj = yaffs_get_equivalent_obj(equiv_obj); 2225 2226 if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK, 2227 parent, name, 0, 0, 0, 2228 equiv_obj, NULL, 0)) 2229 return equiv_obj; 2230 2231 return NULL; 2232 2233 } 2234 2235 2236 2237 /*---------------------- Block Management and Page Allocation -------------*/ 2238 2239 static void yaffs_deinit_blocks(struct yaffs_dev *dev) 2240 { 2241 if (dev->block_info_alt && dev->block_info) 2242 vfree(dev->block_info); 2243 else 2244 kfree(dev->block_info); 2245 2246 dev->block_info_alt = 0; 2247 2248 dev->block_info = NULL; 2249 2250 if (dev->chunk_bits_alt && dev->chunk_bits) 2251 vfree(dev->chunk_bits); 2252 else 2253 kfree(dev->chunk_bits); 2254 dev->chunk_bits_alt = 0; 2255 dev->chunk_bits = NULL; 2256 } 2257 2258 static int yaffs_init_blocks(struct yaffs_dev *dev) 2259 { 2260 int n_blocks = dev->internal_end_block - dev->internal_start_block + 1; 2261 2262 dev->block_info = NULL; 2263 dev->chunk_bits = NULL; 2264 dev->alloc_block = -1; /* force it to get a new one */ 2265 2266 /* If the first allocation strategy fails, thry the alternate one */ 2267 dev->block_info = 2268 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS); 2269 if (!dev->block_info) { 2270 dev->block_info = 2271 vmalloc(n_blocks * sizeof(struct yaffs_block_info)); 2272 dev->block_info_alt = 1; 2273 } else { 2274 dev->block_info_alt = 0; 2275 } 2276 2277 if (!dev->block_info) 2278 goto alloc_error; 2279 2280 /* Set up dynamic blockinfo stuff. Round up bytes. */ 2281 dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8; 2282 dev->chunk_bits = 2283 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS); 2284 if (!dev->chunk_bits) { 2285 dev->chunk_bits = 2286 vmalloc(dev->chunk_bit_stride * n_blocks); 2287 dev->chunk_bits_alt = 1; 2288 } else { 2289 dev->chunk_bits_alt = 0; 2290 } 2291 if (!dev->chunk_bits) 2292 goto alloc_error; 2293 2294 2295 memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info)); 2296 memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks); 2297 return YAFFS_OK; 2298 2299 alloc_error: 2300 yaffs_deinit_blocks(dev); 2301 return YAFFS_FAIL; 2302 } 2303 2304 2305 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no) 2306 { 2307 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no); 2308 int erased_ok = 0; 2309 int i; 2310 2311 /* If the block is still healthy erase it and mark as clean. 2312 * If the block has had a data failure, then retire it. 2313 */ 2314 2315 yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE, 2316 "yaffs_block_became_dirty block %d state %d %s", 2317 block_no, bi->block_state, 2318 (bi->needs_retiring) ? "needs retiring" : ""); 2319 2320 yaffs2_clear_oldest_dirty_seq(dev, bi); 2321 2322 bi->block_state = YAFFS_BLOCK_STATE_DIRTY; 2323 2324 /* If this is the block being garbage collected then stop gc'ing */ 2325 if (block_no == dev->gc_block) 2326 dev->gc_block = 0; 2327 2328 /* If this block is currently the best candidate for gc 2329 * then drop as a candidate */ 2330 if (block_no == dev->gc_dirtiest) { 2331 dev->gc_dirtiest = 0; 2332 dev->gc_pages_in_use = 0; 2333 } 2334 2335 if (!bi->needs_retiring) { 2336 yaffs2_checkpt_invalidate(dev); 2337 erased_ok = yaffs_erase_block(dev, block_no); 2338 if (!erased_ok) { 2339 dev->n_erase_failures++; 2340 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, 2341 "**>> Erasure failed %d", block_no); 2342 } 2343 } 2344 2345 /* Verify erasure if needed */ 2346 if (erased_ok && 2347 ((yaffs_trace_mask & YAFFS_TRACE_ERASE) || 2348 !yaffs_skip_verification(dev))) { 2349 for (i = 0; i < dev->param.chunks_per_block; i++) { 2350 if (!yaffs_check_chunk_erased(dev, 2351 block_no * dev->param.chunks_per_block + i)) { 2352 yaffs_trace(YAFFS_TRACE_ERROR, 2353 ">>Block %d erasure supposedly OK, but chunk %d not erased", 2354 block_no, i); 2355 } 2356 } 2357 } 2358 2359 if (!erased_ok) { 2360 /* We lost a block of free space */ 2361 dev->n_free_chunks -= dev->param.chunks_per_block; 2362 yaffs_retire_block(dev, block_no); 2363 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, 2364 "**>> Block %d retired", block_no); 2365 return; 2366 } 2367 2368 /* Clean it up... */ 2369 bi->block_state = YAFFS_BLOCK_STATE_EMPTY; 2370 bi->seq_number = 0; 2371 dev->n_erased_blocks++; 2372 bi->pages_in_use = 0; 2373 bi->soft_del_pages = 0; 2374 bi->has_shrink_hdr = 0; 2375 bi->skip_erased_check = 1; /* Clean, so no need to check */ 2376 bi->gc_prioritise = 0; 2377 bi->has_summary = 0; 2378 2379 yaffs_clear_chunk_bits(dev, block_no); 2380 2381 yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no); 2382 } 2383 2384 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev, 2385 struct yaffs_block_info *bi, 2386 int old_chunk, u8 *buffer) 2387 { 2388 int new_chunk; 2389 int mark_flash = 1; 2390 struct yaffs_ext_tags tags; 2391 struct yaffs_obj *object; 2392 int matching_chunk; 2393 int ret_val = YAFFS_OK; 2394 2395 memset(&tags, 0, sizeof(tags)); 2396 yaffs_rd_chunk_tags_nand(dev, old_chunk, 2397 buffer, &tags); 2398 object = yaffs_find_by_number(dev, tags.obj_id); 2399 2400 yaffs_trace(YAFFS_TRACE_GC_DETAIL, 2401 "Collecting chunk in block %d, %d %d %d ", 2402 dev->gc_chunk, tags.obj_id, 2403 tags.chunk_id, tags.n_bytes); 2404 2405 if (object && !yaffs_skip_verification(dev)) { 2406 if (tags.chunk_id == 0) 2407 matching_chunk = 2408 object->hdr_chunk; 2409 else if (object->soft_del) 2410 /* Defeat the test */ 2411 matching_chunk = old_chunk; 2412 else 2413 matching_chunk = 2414 yaffs_find_chunk_in_file 2415 (object, tags.chunk_id, 2416 NULL); 2417 2418 if (old_chunk != matching_chunk) 2419 yaffs_trace(YAFFS_TRACE_ERROR, 2420 "gc: page in gc mismatch: %d %d %d %d", 2421 old_chunk, 2422 matching_chunk, 2423 tags.obj_id, 2424 tags.chunk_id); 2425 } 2426 2427 if (!object) { 2428 yaffs_trace(YAFFS_TRACE_ERROR, 2429 "page %d in gc has no object: %d %d %d ", 2430 old_chunk, 2431 tags.obj_id, tags.chunk_id, 2432 tags.n_bytes); 2433 } 2434 2435 if (object && 2436 object->deleted && 2437 object->soft_del && tags.chunk_id != 0) { 2438 /* Data chunk in a soft deleted file, 2439 * throw it away. 2440 * It's a soft deleted data chunk, 2441 * No need to copy this, just forget 2442 * about it and fix up the object. 2443 */ 2444 2445 /* Free chunks already includes 2446 * softdeleted chunks, how ever this 2447 * chunk is going to soon be really 2448 * deleted which will increment free 2449 * chunks. We have to decrement free 2450 * chunks so this works out properly. 2451 */ 2452 dev->n_free_chunks--; 2453 bi->soft_del_pages--; 2454 2455 object->n_data_chunks--; 2456 if (object->n_data_chunks <= 0) { 2457 /* remeber to clean up obj */ 2458 dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id; 2459 dev->n_clean_ups++; 2460 } 2461 mark_flash = 0; 2462 } else if (object) { 2463 /* It's either a data chunk in a live 2464 * file or an ObjectHeader, so we're 2465 * interested in it. 2466 * NB Need to keep the ObjectHeaders of 2467 * deleted files until the whole file 2468 * has been deleted off 2469 */ 2470 tags.serial_number++; 2471 dev->n_gc_copies++; 2472 2473 if (tags.chunk_id == 0) { 2474 /* It is an object Id, 2475 * We need to nuke the 2476 * shrinkheader flags since its 2477 * work is done. 2478 * Also need to clean up 2479 * shadowing. 2480 */ 2481 struct yaffs_obj_hdr *oh; 2482 oh = (struct yaffs_obj_hdr *) buffer; 2483 2484 oh->is_shrink = 0; 2485 tags.extra_is_shrink = 0; 2486 oh->shadows_obj = 0; 2487 oh->inband_shadowed_obj_id = 0; 2488 tags.extra_shadows = 0; 2489 2490 /* Update file size */ 2491 if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) { 2492 yaffs_oh_size_load(oh, 2493 object->variant.file_variant.file_size); 2494 tags.extra_file_size = 2495 object->variant.file_variant.file_size; 2496 } 2497 2498 yaffs_verify_oh(object, oh, &tags, 1); 2499 new_chunk = 2500 yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1); 2501 } else { 2502 new_chunk = 2503 yaffs_write_new_chunk(dev, buffer, &tags, 1); 2504 } 2505 2506 if (new_chunk < 0) { 2507 ret_val = YAFFS_FAIL; 2508 } else { 2509 2510 /* Now fix up the Tnodes etc. */ 2511 2512 if (tags.chunk_id == 0) { 2513 /* It's a header */ 2514 object->hdr_chunk = new_chunk; 2515 object->serial = tags.serial_number; 2516 } else { 2517 /* It's a data chunk */ 2518 yaffs_put_chunk_in_file(object, tags.chunk_id, 2519 new_chunk, 0); 2520 } 2521 } 2522 } 2523 if (ret_val == YAFFS_OK) 2524 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__); 2525 return ret_val; 2526 } 2527 2528 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block) 2529 { 2530 int old_chunk; 2531 int ret_val = YAFFS_OK; 2532 int i; 2533 int is_checkpt_block; 2534 int max_copies; 2535 int chunks_before = yaffs_get_erased_chunks(dev); 2536 int chunks_after; 2537 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block); 2538 2539 is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT); 2540 2541 yaffs_trace(YAFFS_TRACE_TRACING, 2542 "Collecting block %d, in use %d, shrink %d, whole_block %d", 2543 block, bi->pages_in_use, bi->has_shrink_hdr, 2544 whole_block); 2545 2546 /*yaffs_verify_free_chunks(dev); */ 2547 2548 if (bi->block_state == YAFFS_BLOCK_STATE_FULL) 2549 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING; 2550 2551 bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */ 2552 2553 dev->gc_disable = 1; 2554 2555 yaffs_summary_gc(dev, block); 2556 2557 if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) { 2558 yaffs_trace(YAFFS_TRACE_TRACING, 2559 "Collecting block %d that has no chunks in use", 2560 block); 2561 yaffs_block_became_dirty(dev, block); 2562 } else { 2563 2564 u8 *buffer = yaffs_get_temp_buffer(dev); 2565 2566 yaffs_verify_blk(dev, bi, block); 2567 2568 max_copies = (whole_block) ? dev->param.chunks_per_block : 5; 2569 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk; 2570 2571 for (/* init already done */ ; 2572 ret_val == YAFFS_OK && 2573 dev->gc_chunk < dev->param.chunks_per_block && 2574 (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) && 2575 max_copies > 0; 2576 dev->gc_chunk++, old_chunk++) { 2577 if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) { 2578 /* Page is in use and might need to be copied */ 2579 max_copies--; 2580 ret_val = yaffs_gc_process_chunk(dev, bi, 2581 old_chunk, buffer); 2582 } 2583 } 2584 yaffs_release_temp_buffer(dev, buffer); 2585 } 2586 2587 yaffs_verify_collected_blk(dev, bi, block); 2588 2589 if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { 2590 /* 2591 * The gc did not complete. Set block state back to FULL 2592 * because checkpointing does not restore gc. 2593 */ 2594 bi->block_state = YAFFS_BLOCK_STATE_FULL; 2595 } else { 2596 /* The gc completed. */ 2597 /* Do any required cleanups */ 2598 for (i = 0; i < dev->n_clean_ups; i++) { 2599 /* Time to delete the file too */ 2600 struct yaffs_obj *object = 2601 yaffs_find_by_number(dev, dev->gc_cleanup_list[i]); 2602 if (object) { 2603 yaffs_free_tnode(dev, 2604 object->variant.file_variant.top); 2605 object->variant.file_variant.top = NULL; 2606 yaffs_trace(YAFFS_TRACE_GC, 2607 "yaffs: About to finally delete object %d", 2608 object->obj_id); 2609 yaffs_generic_obj_del(object); 2610 object->my_dev->n_deleted_files--; 2611 } 2612 2613 } 2614 chunks_after = yaffs_get_erased_chunks(dev); 2615 if (chunks_before >= chunks_after) 2616 yaffs_trace(YAFFS_TRACE_GC, 2617 "gc did not increase free chunks before %d after %d", 2618 chunks_before, chunks_after); 2619 dev->gc_block = 0; 2620 dev->gc_chunk = 0; 2621 dev->n_clean_ups = 0; 2622 } 2623 2624 dev->gc_disable = 0; 2625 2626 return ret_val; 2627 } 2628 2629 /* 2630 * find_gc_block() selects the dirtiest block (or close enough) 2631 * for garbage collection. 2632 */ 2633 2634 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev, 2635 int aggressive, int background) 2636 { 2637 int i; 2638 int iterations; 2639 unsigned selected = 0; 2640 int prioritised = 0; 2641 int prioritised_exist = 0; 2642 struct yaffs_block_info *bi; 2643 int threshold; 2644 2645 /* First let's see if we need to grab a prioritised block */ 2646 if (dev->has_pending_prioritised_gc && !aggressive) { 2647 dev->gc_dirtiest = 0; 2648 bi = dev->block_info; 2649 for (i = dev->internal_start_block; 2650 i <= dev->internal_end_block && !selected; i++) { 2651 2652 if (bi->gc_prioritise) { 2653 prioritised_exist = 1; 2654 if (bi->block_state == YAFFS_BLOCK_STATE_FULL && 2655 yaffs_block_ok_for_gc(dev, bi)) { 2656 selected = i; 2657 prioritised = 1; 2658 } 2659 } 2660 bi++; 2661 } 2662 2663 /* 2664 * If there is a prioritised block and none was selected then 2665 * this happened because there is at least one old dirty block 2666 * gumming up the works. Let's gc the oldest dirty block. 2667 */ 2668 2669 if (prioritised_exist && 2670 !selected && dev->oldest_dirty_block > 0) 2671 selected = dev->oldest_dirty_block; 2672 2673 if (!prioritised_exist) /* None found, so we can clear this */ 2674 dev->has_pending_prioritised_gc = 0; 2675 } 2676 2677 /* If we're doing aggressive GC then we are happy to take a less-dirty 2678 * block, and search harder. 2679 * else (leasurely gc), then we only bother to do this if the 2680 * block has only a few pages in use. 2681 */ 2682 2683 if (!selected) { 2684 int pages_used; 2685 int n_blocks = 2686 dev->internal_end_block - dev->internal_start_block + 1; 2687 if (aggressive) { 2688 threshold = dev->param.chunks_per_block; 2689 iterations = n_blocks; 2690 } else { 2691 int max_threshold; 2692 2693 if (background) 2694 max_threshold = dev->param.chunks_per_block / 2; 2695 else 2696 max_threshold = dev->param.chunks_per_block / 8; 2697 2698 if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD) 2699 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD; 2700 2701 threshold = background ? (dev->gc_not_done + 2) * 2 : 0; 2702 if (threshold < YAFFS_GC_PASSIVE_THRESHOLD) 2703 threshold = YAFFS_GC_PASSIVE_THRESHOLD; 2704 if (threshold > max_threshold) 2705 threshold = max_threshold; 2706 2707 iterations = n_blocks / 16 + 1; 2708 if (iterations > 100) 2709 iterations = 100; 2710 } 2711 2712 for (i = 0; 2713 i < iterations && 2714 (dev->gc_dirtiest < 1 || 2715 dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); 2716 i++) { 2717 dev->gc_block_finder++; 2718 if (dev->gc_block_finder < dev->internal_start_block || 2719 dev->gc_block_finder > dev->internal_end_block) 2720 dev->gc_block_finder = 2721 dev->internal_start_block; 2722 2723 bi = yaffs_get_block_info(dev, dev->gc_block_finder); 2724 2725 pages_used = bi->pages_in_use - bi->soft_del_pages; 2726 2727 if (bi->block_state == YAFFS_BLOCK_STATE_FULL && 2728 pages_used < dev->param.chunks_per_block && 2729 (dev->gc_dirtiest < 1 || 2730 pages_used < dev->gc_pages_in_use) && 2731 yaffs_block_ok_for_gc(dev, bi)) { 2732 dev->gc_dirtiest = dev->gc_block_finder; 2733 dev->gc_pages_in_use = pages_used; 2734 } 2735 } 2736 2737 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold) 2738 selected = dev->gc_dirtiest; 2739 } 2740 2741 /* 2742 * If nothing has been selected for a while, try the oldest dirty 2743 * because that's gumming up the works. 2744 */ 2745 2746 if (!selected && dev->param.is_yaffs2 && 2747 dev->gc_not_done >= (background ? 10 : 20)) { 2748 yaffs2_find_oldest_dirty_seq(dev); 2749 if (dev->oldest_dirty_block > 0) { 2750 selected = dev->oldest_dirty_block; 2751 dev->gc_dirtiest = selected; 2752 dev->oldest_dirty_gc_count++; 2753 bi = yaffs_get_block_info(dev, selected); 2754 dev->gc_pages_in_use = 2755 bi->pages_in_use - bi->soft_del_pages; 2756 } else { 2757 dev->gc_not_done = 0; 2758 } 2759 } 2760 2761 if (selected) { 2762 yaffs_trace(YAFFS_TRACE_GC, 2763 "GC Selected block %d with %d free, prioritised:%d", 2764 selected, 2765 dev->param.chunks_per_block - dev->gc_pages_in_use, 2766 prioritised); 2767 2768 dev->n_gc_blocks++; 2769 if (background) 2770 dev->bg_gcs++; 2771 2772 dev->gc_dirtiest = 0; 2773 dev->gc_pages_in_use = 0; 2774 dev->gc_not_done = 0; 2775 if (dev->refresh_skip > 0) 2776 dev->refresh_skip--; 2777 } else { 2778 dev->gc_not_done++; 2779 yaffs_trace(YAFFS_TRACE_GC, 2780 "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s", 2781 dev->gc_block_finder, dev->gc_not_done, threshold, 2782 dev->gc_dirtiest, dev->gc_pages_in_use, 2783 dev->oldest_dirty_block, background ? " bg" : ""); 2784 } 2785 2786 return selected; 2787 } 2788 2789 /* New garbage collector 2790 * If we're very low on erased blocks then we do aggressive garbage collection 2791 * otherwise we do "leasurely" garbage collection. 2792 * Aggressive gc looks further (whole array) and will accept less dirty blocks. 2793 * Passive gc only inspects smaller areas and only accepts more dirty blocks. 2794 * 2795 * The idea is to help clear out space in a more spread-out manner. 2796 * Dunno if it really does anything useful. 2797 */ 2798 static int yaffs_check_gc(struct yaffs_dev *dev, int background) 2799 { 2800 int aggressive = 0; 2801 int gc_ok = YAFFS_OK; 2802 int max_tries = 0; 2803 int min_erased; 2804 int erased_chunks; 2805 int checkpt_block_adjust; 2806 2807 if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0) 2808 return YAFFS_OK; 2809 2810 if (dev->gc_disable) 2811 /* Bail out so we don't get recursive gc */ 2812 return YAFFS_OK; 2813 2814 /* This loop should pass the first time. 2815 * Only loops here if the collection does not increase space. 2816 */ 2817 2818 do { 2819 max_tries++; 2820 2821 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev); 2822 2823 min_erased = 2824 dev->param.n_reserved_blocks + checkpt_block_adjust + 1; 2825 erased_chunks = 2826 dev->n_erased_blocks * dev->param.chunks_per_block; 2827 2828 /* If we need a block soon then do aggressive gc. */ 2829 if (dev->n_erased_blocks < min_erased) 2830 aggressive = 1; 2831 else { 2832 if (!background 2833 && erased_chunks > (dev->n_free_chunks / 4)) 2834 break; 2835 2836 if (dev->gc_skip > 20) 2837 dev->gc_skip = 20; 2838 if (erased_chunks < dev->n_free_chunks / 2 || 2839 dev->gc_skip < 1 || background) 2840 aggressive = 0; 2841 else { 2842 dev->gc_skip--; 2843 break; 2844 } 2845 } 2846 2847 dev->gc_skip = 5; 2848 2849 /* If we don't already have a block being gc'd then see if we 2850 * should start another */ 2851 2852 if (dev->gc_block < 1 && !aggressive) { 2853 dev->gc_block = yaffs2_find_refresh_block(dev); 2854 dev->gc_chunk = 0; 2855 dev->n_clean_ups = 0; 2856 } 2857 if (dev->gc_block < 1) { 2858 dev->gc_block = 2859 yaffs_find_gc_block(dev, aggressive, background); 2860 dev->gc_chunk = 0; 2861 dev->n_clean_ups = 0; 2862 } 2863 2864 if (dev->gc_block > 0) { 2865 dev->all_gcs++; 2866 if (!aggressive) 2867 dev->passive_gc_count++; 2868 2869 yaffs_trace(YAFFS_TRACE_GC, 2870 "yaffs: GC n_erased_blocks %d aggressive %d", 2871 dev->n_erased_blocks, aggressive); 2872 2873 gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive); 2874 } 2875 2876 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) && 2877 dev->gc_block > 0) { 2878 yaffs_trace(YAFFS_TRACE_GC, 2879 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d", 2880 dev->n_erased_blocks, max_tries, 2881 dev->gc_block); 2882 } 2883 } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) && 2884 (dev->gc_block > 0) && (max_tries < 2)); 2885 2886 return aggressive ? gc_ok : YAFFS_OK; 2887 } 2888 2889 /* 2890 * yaffs_bg_gc() 2891 * Garbage collects. Intended to be called from a background thread. 2892 * Returns non-zero if at least half the free chunks are erased. 2893 */ 2894 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency) 2895 { 2896 int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block; 2897 2898 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency); 2899 2900 yaffs_check_gc(dev, 1); 2901 return erased_chunks > dev->n_free_chunks / 2; 2902 } 2903 2904 /*-------------------- Data file manipulation -----------------*/ 2905 2906 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer) 2907 { 2908 int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL); 2909 2910 if (nand_chunk >= 0) 2911 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk, 2912 buffer, NULL); 2913 else { 2914 yaffs_trace(YAFFS_TRACE_NANDACCESS, 2915 "Chunk %d not found zero instead", 2916 nand_chunk); 2917 /* get sane (zero) data if you read a hole */ 2918 memset(buffer, 0, in->my_dev->data_bytes_per_chunk); 2919 return 0; 2920 } 2921 2922 } 2923 2924 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash, 2925 int lyn) 2926 { 2927 int block; 2928 int page; 2929 struct yaffs_ext_tags tags; 2930 struct yaffs_block_info *bi; 2931 2932 if (chunk_id <= 0) 2933 return; 2934 2935 dev->n_deletions++; 2936 block = chunk_id / dev->param.chunks_per_block; 2937 page = chunk_id % dev->param.chunks_per_block; 2938 2939 if (!yaffs_check_chunk_bit(dev, block, page)) 2940 yaffs_trace(YAFFS_TRACE_VERIFY, 2941 "Deleting invalid chunk %d", chunk_id); 2942 2943 bi = yaffs_get_block_info(dev, block); 2944 2945 yaffs2_update_oldest_dirty_seq(dev, block, bi); 2946 2947 yaffs_trace(YAFFS_TRACE_DELETION, 2948 "line %d delete of chunk %d", 2949 lyn, chunk_id); 2950 2951 if (!dev->param.is_yaffs2 && mark_flash && 2952 bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) { 2953 2954 memset(&tags, 0, sizeof(tags)); 2955 tags.is_deleted = 1; 2956 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags); 2957 yaffs_handle_chunk_update(dev, chunk_id, &tags); 2958 } else { 2959 dev->n_unmarked_deletions++; 2960 } 2961 2962 /* Pull out of the management area. 2963 * If the whole block became dirty, this will kick off an erasure. 2964 */ 2965 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING || 2966 bi->block_state == YAFFS_BLOCK_STATE_FULL || 2967 bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN || 2968 bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { 2969 dev->n_free_chunks++; 2970 yaffs_clear_chunk_bit(dev, block, page); 2971 bi->pages_in_use--; 2972 2973 if (bi->pages_in_use == 0 && 2974 !bi->has_shrink_hdr && 2975 bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING && 2976 bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) { 2977 yaffs_block_became_dirty(dev, block); 2978 } 2979 } 2980 } 2981 2982 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, 2983 const u8 *buffer, int n_bytes, int use_reserve) 2984 { 2985 /* Find old chunk Need to do this to get serial number 2986 * Write new one and patch into tree. 2987 * Invalidate old tags. 2988 */ 2989 2990 int prev_chunk_id; 2991 struct yaffs_ext_tags prev_tags; 2992 int new_chunk_id; 2993 struct yaffs_ext_tags new_tags; 2994 struct yaffs_dev *dev = in->my_dev; 2995 2996 yaffs_check_gc(dev, 0); 2997 2998 /* Get the previous chunk at this location in the file if it exists. 2999 * If it does not exist then put a zero into the tree. This creates 3000 * the tnode now, rather than later when it is harder to clean up. 3001 */ 3002 prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags); 3003 if (prev_chunk_id < 1 && 3004 !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0)) 3005 return 0; 3006 3007 /* Set up new tags */ 3008 memset(&new_tags, 0, sizeof(new_tags)); 3009 3010 new_tags.chunk_id = inode_chunk; 3011 new_tags.obj_id = in->obj_id; 3012 new_tags.serial_number = 3013 (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1; 3014 new_tags.n_bytes = n_bytes; 3015 3016 if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) { 3017 yaffs_trace(YAFFS_TRACE_ERROR, 3018 "Writing %d bytes to chunk!!!!!!!!!", 3019 n_bytes); 3020 BUG(); 3021 } 3022 3023 new_chunk_id = 3024 yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve); 3025 3026 if (new_chunk_id > 0) { 3027 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0); 3028 3029 if (prev_chunk_id > 0) 3030 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); 3031 3032 yaffs_verify_file_sane(in); 3033 } 3034 return new_chunk_id; 3035 3036 } 3037 3038 3039 3040 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set, 3041 const YCHAR *name, const void *value, int size, 3042 int flags) 3043 { 3044 struct yaffs_xattr_mod xmod; 3045 int result; 3046 3047 xmod.set = set; 3048 xmod.name = name; 3049 xmod.data = value; 3050 xmod.size = size; 3051 xmod.flags = flags; 3052 xmod.result = -ENOSPC; 3053 3054 result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod); 3055 3056 if (result > 0) 3057 return xmod.result; 3058 else 3059 return -ENOSPC; 3060 } 3061 3062 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer, 3063 struct yaffs_xattr_mod *xmod) 3064 { 3065 int retval = 0; 3066 int x_offs = sizeof(struct yaffs_obj_hdr); 3067 struct yaffs_dev *dev = obj->my_dev; 3068 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); 3069 char *x_buffer = buffer + x_offs; 3070 3071 if (xmod->set) 3072 retval = 3073 nval_set(x_buffer, x_size, xmod->name, xmod->data, 3074 xmod->size, xmod->flags); 3075 else 3076 retval = nval_del(x_buffer, x_size, xmod->name); 3077 3078 obj->has_xattr = nval_hasvalues(x_buffer, x_size); 3079 obj->xattr_known = 1; 3080 xmod->result = retval; 3081 3082 return retval; 3083 } 3084 3085 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name, 3086 void *value, int size) 3087 { 3088 char *buffer = NULL; 3089 int result; 3090 struct yaffs_ext_tags tags; 3091 struct yaffs_dev *dev = obj->my_dev; 3092 int x_offs = sizeof(struct yaffs_obj_hdr); 3093 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); 3094 char *x_buffer; 3095 int retval = 0; 3096 3097 if (obj->hdr_chunk < 1) 3098 return -ENODATA; 3099 3100 /* If we know that the object has no xattribs then don't do all the 3101 * reading and parsing. 3102 */ 3103 if (obj->xattr_known && !obj->has_xattr) { 3104 if (name) 3105 return -ENODATA; 3106 else 3107 return 0; 3108 } 3109 3110 buffer = (char *)yaffs_get_temp_buffer(dev); 3111 if (!buffer) 3112 return -ENOMEM; 3113 3114 result = 3115 yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags); 3116 3117 if (result != YAFFS_OK) 3118 retval = -ENOENT; 3119 else { 3120 x_buffer = buffer + x_offs; 3121 3122 if (!obj->xattr_known) { 3123 obj->has_xattr = nval_hasvalues(x_buffer, x_size); 3124 obj->xattr_known = 1; 3125 } 3126 3127 if (name) 3128 retval = nval_get(x_buffer, x_size, name, value, size); 3129 else 3130 retval = nval_list(x_buffer, x_size, value, size); 3131 } 3132 yaffs_release_temp_buffer(dev, (u8 *) buffer); 3133 return retval; 3134 } 3135 3136 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name, 3137 const void *value, int size, int flags) 3138 { 3139 return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags); 3140 } 3141 3142 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name) 3143 { 3144 return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0); 3145 } 3146 3147 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value, 3148 int size) 3149 { 3150 return yaffs_do_xattrib_fetch(obj, name, value, size); 3151 } 3152 3153 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size) 3154 { 3155 return yaffs_do_xattrib_fetch(obj, NULL, buffer, size); 3156 } 3157 3158 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in) 3159 { 3160 u8 *buf; 3161 struct yaffs_obj_hdr *oh; 3162 struct yaffs_dev *dev; 3163 struct yaffs_ext_tags tags; 3164 3165 if (!in || !in->lazy_loaded || in->hdr_chunk < 1) 3166 return; 3167 3168 dev = in->my_dev; 3169 in->lazy_loaded = 0; 3170 buf = yaffs_get_temp_buffer(dev); 3171 3172 yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags); 3173 oh = (struct yaffs_obj_hdr *)buf; 3174 3175 in->yst_mode = oh->yst_mode; 3176 yaffs_load_attribs(in, oh); 3177 yaffs_set_obj_name_from_oh(in, oh); 3178 3179 if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { 3180 in->variant.symlink_variant.alias = 3181 yaffs_clone_str(oh->alias); 3182 } 3183 yaffs_release_temp_buffer(dev, buf); 3184 } 3185 3186 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name, 3187 const YCHAR *oh_name, int buff_size) 3188 { 3189 #ifdef CONFIG_YAFFS_AUTO_UNICODE 3190 if (dev->param.auto_unicode) { 3191 if (*oh_name) { 3192 /* It is an ASCII name, do an ASCII to 3193 * unicode conversion */ 3194 const char *ascii_oh_name = (const char *)oh_name; 3195 int n = buff_size - 1; 3196 while (n > 0 && *ascii_oh_name) { 3197 *name = *ascii_oh_name; 3198 name++; 3199 ascii_oh_name++; 3200 n--; 3201 } 3202 } else { 3203 yaffs_strncpy(name, oh_name + 1, buff_size - 1); 3204 } 3205 3206 return; 3207 } 3208 #endif 3209 3210 yaffs_strncpy(name, oh_name, buff_size - 1); 3211 } 3212 3213 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name, 3214 const YCHAR *name) 3215 { 3216 #ifdef CONFIG_YAFFS_AUTO_UNICODE 3217 int is_ascii; 3218 YCHAR *w; 3219 3220 if (dev->param.auto_unicode) { 3221 3222 is_ascii = 1; 3223 w = name; 3224 3225 /* Figure out if the name will fit in ascii character set */ 3226 while (is_ascii && *w) { 3227 if ((*w) & 0xff00) 3228 is_ascii = 0; 3229 w++; 3230 } 3231 3232 if (is_ascii) { 3233 /* It is an ASCII name, so convert unicode to ascii */ 3234 char *ascii_oh_name = (char *)oh_name; 3235 int n = YAFFS_MAX_NAME_LENGTH - 1; 3236 while (n > 0 && *name) { 3237 *ascii_oh_name = *name; 3238 name++; 3239 ascii_oh_name++; 3240 n--; 3241 } 3242 } else { 3243 /* Unicode name, so save starting at the second YCHAR */ 3244 *oh_name = 0; 3245 yaffs_strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2); 3246 } 3247 3248 return; 3249 } 3250 #endif 3251 3252 yaffs_strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1); 3253 } 3254 3255 /* UpdateObjectHeader updates the header on NAND for an object. 3256 * If name is not NULL, then that new name is used. 3257 */ 3258 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force, 3259 int is_shrink, int shadows, struct yaffs_xattr_mod *xmod) 3260 { 3261 3262 struct yaffs_block_info *bi; 3263 struct yaffs_dev *dev = in->my_dev; 3264 int prev_chunk_id; 3265 int ret_val = 0; 3266 int new_chunk_id; 3267 struct yaffs_ext_tags new_tags; 3268 struct yaffs_ext_tags old_tags; 3269 const YCHAR *alias = NULL; 3270 u8 *buffer = NULL; 3271 YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1]; 3272 struct yaffs_obj_hdr *oh = NULL; 3273 loff_t file_size = 0; 3274 3275 yaffs_strcpy(old_name, _Y("silly old name")); 3276 3277 if (in->fake && in != dev->root_dir && !force && !xmod) 3278 return ret_val; 3279 3280 yaffs_check_gc(dev, 0); 3281 yaffs_check_obj_details_loaded(in); 3282 3283 buffer = yaffs_get_temp_buffer(in->my_dev); 3284 oh = (struct yaffs_obj_hdr *)buffer; 3285 3286 prev_chunk_id = in->hdr_chunk; 3287 3288 if (prev_chunk_id > 0) { 3289 yaffs_rd_chunk_tags_nand(dev, prev_chunk_id, 3290 buffer, &old_tags); 3291 3292 yaffs_verify_oh(in, oh, &old_tags, 0); 3293 memcpy(old_name, oh->name, sizeof(oh->name)); 3294 memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr)); 3295 } else { 3296 memset(buffer, 0xff, dev->data_bytes_per_chunk); 3297 } 3298 3299 oh->type = in->variant_type; 3300 oh->yst_mode = in->yst_mode; 3301 oh->shadows_obj = oh->inband_shadowed_obj_id = shadows; 3302 3303 yaffs_load_attribs_oh(oh, in); 3304 3305 if (in->parent) 3306 oh->parent_obj_id = in->parent->obj_id; 3307 else 3308 oh->parent_obj_id = 0; 3309 3310 if (name && *name) { 3311 memset(oh->name, 0, sizeof(oh->name)); 3312 yaffs_load_oh_from_name(dev, oh->name, name); 3313 } else if (prev_chunk_id > 0) { 3314 memcpy(oh->name, old_name, sizeof(oh->name)); 3315 } else { 3316 memset(oh->name, 0, sizeof(oh->name)); 3317 } 3318 3319 oh->is_shrink = is_shrink; 3320 3321 switch (in->variant_type) { 3322 case YAFFS_OBJECT_TYPE_UNKNOWN: 3323 /* Should not happen */ 3324 break; 3325 case YAFFS_OBJECT_TYPE_FILE: 3326 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED && 3327 oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED) 3328 file_size = in->variant.file_variant.file_size; 3329 yaffs_oh_size_load(oh, file_size); 3330 break; 3331 case YAFFS_OBJECT_TYPE_HARDLINK: 3332 oh->equiv_id = in->variant.hardlink_variant.equiv_id; 3333 break; 3334 case YAFFS_OBJECT_TYPE_SPECIAL: 3335 /* Do nothing */ 3336 break; 3337 case YAFFS_OBJECT_TYPE_DIRECTORY: 3338 /* Do nothing */ 3339 break; 3340 case YAFFS_OBJECT_TYPE_SYMLINK: 3341 alias = in->variant.symlink_variant.alias; 3342 if (!alias) 3343 alias = _Y("no alias"); 3344 yaffs_strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH); 3345 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0; 3346 break; 3347 } 3348 3349 /* process any xattrib modifications */ 3350 if (xmod) 3351 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod); 3352 3353 /* Tags */ 3354 memset(&new_tags, 0, sizeof(new_tags)); 3355 in->serial++; 3356 new_tags.chunk_id = 0; 3357 new_tags.obj_id = in->obj_id; 3358 new_tags.serial_number = in->serial; 3359 3360 /* Add extra info for file header */ 3361 new_tags.extra_available = 1; 3362 new_tags.extra_parent_id = oh->parent_obj_id; 3363 new_tags.extra_file_size = file_size; 3364 new_tags.extra_is_shrink = oh->is_shrink; 3365 new_tags.extra_equiv_id = oh->equiv_id; 3366 new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0; 3367 new_tags.extra_obj_type = in->variant_type; 3368 yaffs_verify_oh(in, oh, &new_tags, 1); 3369 3370 /* Create new chunk in NAND */ 3371 new_chunk_id = 3372 yaffs_write_new_chunk(dev, buffer, &new_tags, 3373 (prev_chunk_id > 0) ? 1 : 0); 3374 3375 if (buffer) 3376 yaffs_release_temp_buffer(dev, buffer); 3377 3378 if (new_chunk_id < 0) 3379 return new_chunk_id; 3380 3381 in->hdr_chunk = new_chunk_id; 3382 3383 if (prev_chunk_id > 0) 3384 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); 3385 3386 if (!yaffs_obj_cache_dirty(in)) 3387 in->dirty = 0; 3388 3389 /* If this was a shrink, then mark the block 3390 * that the chunk lives on */ 3391 if (is_shrink) { 3392 bi = yaffs_get_block_info(in->my_dev, 3393 new_chunk_id / 3394 in->my_dev->param.chunks_per_block); 3395 bi->has_shrink_hdr = 1; 3396 } 3397 3398 3399 return new_chunk_id; 3400 } 3401 3402 /*--------------------- File read/write ------------------------ 3403 * Read and write have very similar structures. 3404 * In general the read/write has three parts to it 3405 * An incomplete chunk to start with (if the read/write is not chunk-aligned) 3406 * Some complete chunks 3407 * An incomplete chunk to end off with 3408 * 3409 * Curve-balls: the first chunk might also be the last chunk. 3410 */ 3411 3412 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes) 3413 { 3414 int chunk; 3415 u32 start; 3416 int n_copy; 3417 int n = n_bytes; 3418 int n_done = 0; 3419 struct yaffs_cache *cache; 3420 struct yaffs_dev *dev; 3421 3422 dev = in->my_dev; 3423 3424 while (n > 0) { 3425 yaffs_addr_to_chunk(dev, offset, &chunk, &start); 3426 chunk++; 3427 3428 /* OK now check for the curveball where the start and end are in 3429 * the same chunk. 3430 */ 3431 if ((start + n) < dev->data_bytes_per_chunk) 3432 n_copy = n; 3433 else 3434 n_copy = dev->data_bytes_per_chunk - start; 3435 3436 cache = yaffs_find_chunk_cache(in, chunk); 3437 3438 /* If the chunk is already in the cache or it is less than 3439 * a whole chunk or we're using inband tags then use the cache 3440 * (if there is caching) else bypass the cache. 3441 */ 3442 if (cache || n_copy != dev->data_bytes_per_chunk || 3443 dev->param.inband_tags) { 3444 if (dev->param.n_caches > 0) { 3445 3446 /* If we can't find the data in the cache, 3447 * then load it up. */ 3448 3449 if (!cache) { 3450 cache = 3451 yaffs_grab_chunk_cache(in->my_dev); 3452 cache->object = in; 3453 cache->chunk_id = chunk; 3454 cache->dirty = 0; 3455 cache->locked = 0; 3456 yaffs_rd_data_obj(in, chunk, 3457 cache->data); 3458 cache->n_bytes = 0; 3459 } 3460 3461 yaffs_use_cache(dev, cache, 0); 3462 3463 cache->locked = 1; 3464 3465 memcpy(buffer, &cache->data[start], n_copy); 3466 3467 cache->locked = 0; 3468 } else { 3469 /* Read into the local buffer then copy.. */ 3470 3471 u8 *local_buffer = 3472 yaffs_get_temp_buffer(dev); 3473 yaffs_rd_data_obj(in, chunk, local_buffer); 3474 3475 memcpy(buffer, &local_buffer[start], n_copy); 3476 3477 yaffs_release_temp_buffer(dev, local_buffer); 3478 } 3479 } else { 3480 /* A full chunk. Read directly into the buffer. */ 3481 yaffs_rd_data_obj(in, chunk, buffer); 3482 } 3483 n -= n_copy; 3484 offset += n_copy; 3485 buffer += n_copy; 3486 n_done += n_copy; 3487 } 3488 return n_done; 3489 } 3490 3491 int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset, 3492 int n_bytes, int write_through) 3493 { 3494 3495 int chunk; 3496 u32 start; 3497 int n_copy; 3498 int n = n_bytes; 3499 int n_done = 0; 3500 int n_writeback; 3501 loff_t start_write = offset; 3502 int chunk_written = 0; 3503 u32 n_bytes_read; 3504 loff_t chunk_start; 3505 struct yaffs_dev *dev; 3506 3507 dev = in->my_dev; 3508 3509 while (n > 0 && chunk_written >= 0) { 3510 yaffs_addr_to_chunk(dev, offset, &chunk, &start); 3511 3512 if (((loff_t)chunk) * 3513 dev->data_bytes_per_chunk + start != offset || 3514 start >= dev->data_bytes_per_chunk) { 3515 yaffs_trace(YAFFS_TRACE_ERROR, 3516 "AddrToChunk of offset %lld gives chunk %d start %d", 3517 offset, chunk, start); 3518 } 3519 chunk++; /* File pos to chunk in file offset */ 3520 3521 /* OK now check for the curveball where the start and end are in 3522 * the same chunk. 3523 */ 3524 3525 if ((start + n) < dev->data_bytes_per_chunk) { 3526 n_copy = n; 3527 3528 /* Now calculate how many bytes to write back.... 3529 * If we're overwriting and not writing to then end of 3530 * file then we need to write back as much as was there 3531 * before. 3532 */ 3533 3534 chunk_start = (((loff_t)(chunk - 1)) * 3535 dev->data_bytes_per_chunk); 3536 3537 if (chunk_start > in->variant.file_variant.file_size) 3538 n_bytes_read = 0; /* Past end of file */ 3539 else 3540 n_bytes_read = 3541 in->variant.file_variant.file_size - 3542 chunk_start; 3543 3544 if (n_bytes_read > dev->data_bytes_per_chunk) 3545 n_bytes_read = dev->data_bytes_per_chunk; 3546 3547 n_writeback = 3548 (n_bytes_read > 3549 (start + n)) ? n_bytes_read : (start + n); 3550 3551 if (n_writeback < 0 || 3552 n_writeback > dev->data_bytes_per_chunk) 3553 BUG(); 3554 3555 } else { 3556 n_copy = dev->data_bytes_per_chunk - start; 3557 n_writeback = dev->data_bytes_per_chunk; 3558 } 3559 3560 if (n_copy != dev->data_bytes_per_chunk || 3561 dev->param.inband_tags) { 3562 /* An incomplete start or end chunk (or maybe both 3563 * start and end chunk), or we're using inband tags, 3564 * so we want to use the cache buffers. 3565 */ 3566 if (dev->param.n_caches > 0) { 3567 struct yaffs_cache *cache; 3568 3569 /* If we can't find the data in the cache, then 3570 * load the cache */ 3571 cache = yaffs_find_chunk_cache(in, chunk); 3572 3573 if (!cache && 3574 yaffs_check_alloc_available(dev, 1)) { 3575 cache = yaffs_grab_chunk_cache(dev); 3576 cache->object = in; 3577 cache->chunk_id = chunk; 3578 cache->dirty = 0; 3579 cache->locked = 0; 3580 yaffs_rd_data_obj(in, chunk, 3581 cache->data); 3582 } else if (cache && 3583 !cache->dirty && 3584 !yaffs_check_alloc_available(dev, 3585 1)) { 3586 /* Drop the cache if it was a read cache 3587 * item and no space check has been made 3588 * for it. 3589 */ 3590 cache = NULL; 3591 } 3592 3593 if (cache) { 3594 yaffs_use_cache(dev, cache, 1); 3595 cache->locked = 1; 3596 3597 memcpy(&cache->data[start], buffer, 3598 n_copy); 3599 3600 cache->locked = 0; 3601 cache->n_bytes = n_writeback; 3602 3603 if (write_through) { 3604 chunk_written = 3605 yaffs_wr_data_obj 3606 (cache->object, 3607 cache->chunk_id, 3608 cache->data, 3609 cache->n_bytes, 1); 3610 cache->dirty = 0; 3611 } 3612 } else { 3613 chunk_written = -1; /* fail write */ 3614 } 3615 } else { 3616 /* An incomplete start or end chunk (or maybe 3617 * both start and end chunk). Read into the 3618 * local buffer then copy over and write back. 3619 */ 3620 3621 u8 *local_buffer = yaffs_get_temp_buffer(dev); 3622 3623 yaffs_rd_data_obj(in, chunk, local_buffer); 3624 memcpy(&local_buffer[start], buffer, n_copy); 3625 3626 chunk_written = 3627 yaffs_wr_data_obj(in, chunk, 3628 local_buffer, 3629 n_writeback, 0); 3630 3631 yaffs_release_temp_buffer(dev, local_buffer); 3632 } 3633 } else { 3634 /* A full chunk. Write directly from the buffer. */ 3635 3636 chunk_written = 3637 yaffs_wr_data_obj(in, chunk, buffer, 3638 dev->data_bytes_per_chunk, 0); 3639 3640 /* Since we've overwritten the cached data, 3641 * we better invalidate it. */ 3642 yaffs_invalidate_chunk_cache(in, chunk); 3643 } 3644 3645 if (chunk_written >= 0) { 3646 n -= n_copy; 3647 offset += n_copy; 3648 buffer += n_copy; 3649 n_done += n_copy; 3650 } 3651 } 3652 3653 /* Update file object */ 3654 3655 if ((start_write + n_done) > in->variant.file_variant.file_size) 3656 in->variant.file_variant.file_size = (start_write + n_done); 3657 3658 in->dirty = 1; 3659 return n_done; 3660 } 3661 3662 int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset, 3663 int n_bytes, int write_through) 3664 { 3665 yaffs2_handle_hole(in, offset); 3666 return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through); 3667 } 3668 3669 /* ---------------------- File resizing stuff ------------------ */ 3670 3671 static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size) 3672 { 3673 3674 struct yaffs_dev *dev = in->my_dev; 3675 loff_t old_size = in->variant.file_variant.file_size; 3676 int i; 3677 int chunk_id; 3678 u32 dummy; 3679 int last_del; 3680 int start_del; 3681 3682 if (old_size > 0) 3683 yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy); 3684 else 3685 last_del = 0; 3686 3687 yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1, 3688 &start_del, &dummy); 3689 last_del++; 3690 start_del++; 3691 3692 /* Delete backwards so that we don't end up with holes if 3693 * power is lost part-way through the operation. 3694 */ 3695 for (i = last_del; i >= start_del; i--) { 3696 /* NB this could be optimised somewhat, 3697 * eg. could retrieve the tags and write them without 3698 * using yaffs_chunk_del 3699 */ 3700 3701 chunk_id = yaffs_find_del_file_chunk(in, i, NULL); 3702 3703 if (chunk_id < 1) 3704 continue; 3705 3706 if (chunk_id < 3707 (dev->internal_start_block * dev->param.chunks_per_block) || 3708 chunk_id >= 3709 ((dev->internal_end_block + 1) * 3710 dev->param.chunks_per_block)) { 3711 yaffs_trace(YAFFS_TRACE_ALWAYS, 3712 "Found daft chunk_id %d for %d", 3713 chunk_id, i); 3714 } else { 3715 in->n_data_chunks--; 3716 yaffs_chunk_del(dev, chunk_id, 1, __LINE__); 3717 } 3718 } 3719 } 3720 3721 void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size) 3722 { 3723 int new_full; 3724 u32 new_partial; 3725 struct yaffs_dev *dev = obj->my_dev; 3726 3727 yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial); 3728 3729 yaffs_prune_chunks(obj, new_size); 3730 3731 if (new_partial != 0) { 3732 int last_chunk = 1 + new_full; 3733 u8 *local_buffer = yaffs_get_temp_buffer(dev); 3734 3735 /* Rewrite the last chunk with its new size and zero pad */ 3736 yaffs_rd_data_obj(obj, last_chunk, local_buffer); 3737 memset(local_buffer + new_partial, 0, 3738 dev->data_bytes_per_chunk - new_partial); 3739 3740 yaffs_wr_data_obj(obj, last_chunk, local_buffer, 3741 new_partial, 1); 3742 3743 yaffs_release_temp_buffer(dev, local_buffer); 3744 } 3745 3746 obj->variant.file_variant.file_size = new_size; 3747 3748 yaffs_prune_tree(dev, &obj->variant.file_variant); 3749 } 3750 3751 int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size) 3752 { 3753 struct yaffs_dev *dev = in->my_dev; 3754 loff_t old_size = in->variant.file_variant.file_size; 3755 3756 yaffs_flush_file_cache(in); 3757 yaffs_invalidate_whole_cache(in); 3758 3759 yaffs_check_gc(dev, 0); 3760 3761 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) 3762 return YAFFS_FAIL; 3763 3764 if (new_size == old_size) 3765 return YAFFS_OK; 3766 3767 if (new_size > old_size) { 3768 yaffs2_handle_hole(in, new_size); 3769 in->variant.file_variant.file_size = new_size; 3770 } else { 3771 /* new_size < old_size */ 3772 yaffs_resize_file_down(in, new_size); 3773 } 3774 3775 /* Write a new object header to reflect the resize. 3776 * show we've shrunk the file, if need be 3777 * Do this only if the file is not in the deleted directories 3778 * and is not shadowed. 3779 */ 3780 if (in->parent && 3781 !in->is_shadowed && 3782 in->parent->obj_id != YAFFS_OBJECTID_UNLINKED && 3783 in->parent->obj_id != YAFFS_OBJECTID_DELETED) 3784 yaffs_update_oh(in, NULL, 0, 0, 0, NULL); 3785 3786 return YAFFS_OK; 3787 } 3788 3789 int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync) 3790 { 3791 if (!in->dirty) 3792 return YAFFS_OK; 3793 3794 yaffs_flush_file_cache(in); 3795 3796 if (data_sync) 3797 return YAFFS_OK; 3798 3799 if (update_time) 3800 yaffs_load_current_time(in, 0, 0); 3801 3802 return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ? 3803 YAFFS_OK : YAFFS_FAIL; 3804 } 3805 3806 3807 /* yaffs_del_file deletes the whole file data 3808 * and the inode associated with the file. 3809 * It does not delete the links associated with the file. 3810 */ 3811 static int yaffs_unlink_file_if_needed(struct yaffs_obj *in) 3812 { 3813 int ret_val; 3814 int del_now = 0; 3815 struct yaffs_dev *dev = in->my_dev; 3816 3817 if (!in->my_inode) 3818 del_now = 1; 3819 3820 if (del_now) { 3821 ret_val = 3822 yaffs_change_obj_name(in, in->my_dev->del_dir, 3823 _Y("deleted"), 0, 0); 3824 yaffs_trace(YAFFS_TRACE_TRACING, 3825 "yaffs: immediate deletion of file %d", 3826 in->obj_id); 3827 in->deleted = 1; 3828 in->my_dev->n_deleted_files++; 3829 if (dev->param.disable_soft_del || dev->param.is_yaffs2) 3830 yaffs_resize_file(in, 0); 3831 yaffs_soft_del_file(in); 3832 } else { 3833 ret_val = 3834 yaffs_change_obj_name(in, in->my_dev->unlinked_dir, 3835 _Y("unlinked"), 0, 0); 3836 } 3837 return ret_val; 3838 } 3839 3840 int yaffs_del_file(struct yaffs_obj *in) 3841 { 3842 int ret_val = YAFFS_OK; 3843 int deleted; /* Need to cache value on stack if in is freed */ 3844 struct yaffs_dev *dev = in->my_dev; 3845 3846 if (dev->param.disable_soft_del || dev->param.is_yaffs2) 3847 yaffs_resize_file(in, 0); 3848 3849 if (in->n_data_chunks > 0) { 3850 /* Use soft deletion if there is data in the file. 3851 * That won't be the case if it has been resized to zero. 3852 */ 3853 if (!in->unlinked) 3854 ret_val = yaffs_unlink_file_if_needed(in); 3855 3856 deleted = in->deleted; 3857 3858 if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) { 3859 in->deleted = 1; 3860 deleted = 1; 3861 in->my_dev->n_deleted_files++; 3862 yaffs_soft_del_file(in); 3863 } 3864 return deleted ? YAFFS_OK : YAFFS_FAIL; 3865 } else { 3866 /* The file has no data chunks so we toss it immediately */ 3867 yaffs_free_tnode(in->my_dev, in->variant.file_variant.top); 3868 in->variant.file_variant.top = NULL; 3869 yaffs_generic_obj_del(in); 3870 3871 return YAFFS_OK; 3872 } 3873 } 3874 3875 int yaffs_is_non_empty_dir(struct yaffs_obj *obj) 3876 { 3877 return (obj && 3878 obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) && 3879 !(list_empty(&obj->variant.dir_variant.children)); 3880 } 3881 3882 static int yaffs_del_dir(struct yaffs_obj *obj) 3883 { 3884 /* First check that the directory is empty. */ 3885 if (yaffs_is_non_empty_dir(obj)) 3886 return YAFFS_FAIL; 3887 3888 return yaffs_generic_obj_del(obj); 3889 } 3890 3891 static int yaffs_del_symlink(struct yaffs_obj *in) 3892 { 3893 kfree(in->variant.symlink_variant.alias); 3894 in->variant.symlink_variant.alias = NULL; 3895 3896 return yaffs_generic_obj_del(in); 3897 } 3898 3899 static int yaffs_del_link(struct yaffs_obj *in) 3900 { 3901 /* remove this hardlink from the list associated with the equivalent 3902 * object 3903 */ 3904 list_del_init(&in->hard_links); 3905 return yaffs_generic_obj_del(in); 3906 } 3907 3908 int yaffs_del_obj(struct yaffs_obj *obj) 3909 { 3910 int ret_val = -1; 3911 3912 switch (obj->variant_type) { 3913 case YAFFS_OBJECT_TYPE_FILE: 3914 ret_val = yaffs_del_file(obj); 3915 break; 3916 case YAFFS_OBJECT_TYPE_DIRECTORY: 3917 if (!list_empty(&obj->variant.dir_variant.dirty)) { 3918 yaffs_trace(YAFFS_TRACE_BACKGROUND, 3919 "Remove object %d from dirty directories", 3920 obj->obj_id); 3921 list_del_init(&obj->variant.dir_variant.dirty); 3922 } 3923 return yaffs_del_dir(obj); 3924 break; 3925 case YAFFS_OBJECT_TYPE_SYMLINK: 3926 ret_val = yaffs_del_symlink(obj); 3927 break; 3928 case YAFFS_OBJECT_TYPE_HARDLINK: 3929 ret_val = yaffs_del_link(obj); 3930 break; 3931 case YAFFS_OBJECT_TYPE_SPECIAL: 3932 ret_val = yaffs_generic_obj_del(obj); 3933 break; 3934 case YAFFS_OBJECT_TYPE_UNKNOWN: 3935 ret_val = 0; 3936 break; /* should not happen. */ 3937 } 3938 return ret_val; 3939 } 3940 3941 static int yaffs_unlink_worker(struct yaffs_obj *obj) 3942 { 3943 int del_now = 0; 3944 3945 if (!obj) 3946 return YAFFS_FAIL; 3947 3948 if (!obj->my_inode) 3949 del_now = 1; 3950 3951 yaffs_update_parent(obj->parent); 3952 3953 if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) { 3954 return yaffs_del_link(obj); 3955 } else if (!list_empty(&obj->hard_links)) { 3956 /* Curve ball: We're unlinking an object that has a hardlink. 3957 * 3958 * This problem arises because we are not strictly following 3959 * The Linux link/inode model. 3960 * 3961 * We can't really delete the object. 3962 * Instead, we do the following: 3963 * - Select a hardlink. 3964 * - Unhook it from the hard links 3965 * - Move it from its parent directory so that the rename works. 3966 * - Rename the object to the hardlink's name. 3967 * - Delete the hardlink 3968 */ 3969 3970 struct yaffs_obj *hl; 3971 struct yaffs_obj *parent; 3972 int ret_val; 3973 YCHAR name[YAFFS_MAX_NAME_LENGTH + 1]; 3974 3975 hl = list_entry(obj->hard_links.next, struct yaffs_obj, 3976 hard_links); 3977 3978 yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1); 3979 parent = hl->parent; 3980 3981 list_del_init(&hl->hard_links); 3982 3983 yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl); 3984 3985 ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0); 3986 3987 if (ret_val == YAFFS_OK) 3988 ret_val = yaffs_generic_obj_del(hl); 3989 3990 return ret_val; 3991 3992 } else if (del_now) { 3993 switch (obj->variant_type) { 3994 case YAFFS_OBJECT_TYPE_FILE: 3995 return yaffs_del_file(obj); 3996 break; 3997 case YAFFS_OBJECT_TYPE_DIRECTORY: 3998 list_del_init(&obj->variant.dir_variant.dirty); 3999 return yaffs_del_dir(obj); 4000 break; 4001 case YAFFS_OBJECT_TYPE_SYMLINK: 4002 return yaffs_del_symlink(obj); 4003 break; 4004 case YAFFS_OBJECT_TYPE_SPECIAL: 4005 return yaffs_generic_obj_del(obj); 4006 break; 4007 case YAFFS_OBJECT_TYPE_HARDLINK: 4008 case YAFFS_OBJECT_TYPE_UNKNOWN: 4009 default: 4010 return YAFFS_FAIL; 4011 } 4012 } else if (yaffs_is_non_empty_dir(obj)) { 4013 return YAFFS_FAIL; 4014 } else { 4015 return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir, 4016 _Y("unlinked"), 0, 0); 4017 } 4018 } 4019 4020 static int yaffs_unlink_obj(struct yaffs_obj *obj) 4021 { 4022 if (obj && obj->unlink_allowed) 4023 return yaffs_unlink_worker(obj); 4024 4025 return YAFFS_FAIL; 4026 } 4027 4028 int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name) 4029 { 4030 struct yaffs_obj *obj; 4031 4032 obj = yaffs_find_by_name(dir, name); 4033 return yaffs_unlink_obj(obj); 4034 } 4035 4036 /* Note: 4037 * If old_name is NULL then we take old_dir as the object to be renamed. 4038 */ 4039 int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name, 4040 struct yaffs_obj *new_dir, const YCHAR *new_name) 4041 { 4042 struct yaffs_obj *obj = NULL; 4043 struct yaffs_obj *existing_target = NULL; 4044 int force = 0; 4045 int result; 4046 struct yaffs_dev *dev; 4047 4048 if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 4049 BUG(); 4050 return YAFFS_FAIL; 4051 } 4052 if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 4053 BUG(); 4054 return YAFFS_FAIL; 4055 } 4056 4057 dev = old_dir->my_dev; 4058 4059 #ifdef CONFIG_YAFFS_CASE_INSENSITIVE 4060 /* Special case for case insemsitive systems. 4061 * While look-up is case insensitive, the name isn't. 4062 * Therefore we might want to change x.txt to X.txt 4063 */ 4064 if (old_dir == new_dir && 4065 old_name && new_name && 4066 yaffs_strcmp(old_name, new_name) == 0) 4067 force = 1; 4068 #endif 4069 4070 if (yaffs_strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) > 4071 YAFFS_MAX_NAME_LENGTH) 4072 /* ENAMETOOLONG */ 4073 return YAFFS_FAIL; 4074 4075 if (old_name) 4076 obj = yaffs_find_by_name(old_dir, old_name); 4077 else{ 4078 obj = old_dir; 4079 old_dir = obj->parent; 4080 } 4081 4082 if (obj && obj->rename_allowed) { 4083 /* Now handle an existing target, if there is one */ 4084 existing_target = yaffs_find_by_name(new_dir, new_name); 4085 if (yaffs_is_non_empty_dir(existing_target)) { 4086 return YAFFS_FAIL; /* ENOTEMPTY */ 4087 } else if (existing_target && existing_target != obj) { 4088 /* Nuke the target first, using shadowing, 4089 * but only if it isn't the same object. 4090 * 4091 * Note we must disable gc here otherwise it can mess 4092 * up the shadowing. 4093 * 4094 */ 4095 dev->gc_disable = 1; 4096 yaffs_change_obj_name(obj, new_dir, new_name, force, 4097 existing_target->obj_id); 4098 existing_target->is_shadowed = 1; 4099 yaffs_unlink_obj(existing_target); 4100 dev->gc_disable = 0; 4101 } 4102 4103 result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0); 4104 4105 yaffs_update_parent(old_dir); 4106 if (new_dir != old_dir) 4107 yaffs_update_parent(new_dir); 4108 4109 return result; 4110 } 4111 return YAFFS_FAIL; 4112 } 4113 4114 /*----------------------- Initialisation Scanning ---------------------- */ 4115 4116 void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id, 4117 int backward_scanning) 4118 { 4119 struct yaffs_obj *obj; 4120 4121 if (backward_scanning) { 4122 /* Handle YAFFS2 case (backward scanning) 4123 * If the shadowed object exists then ignore. 4124 */ 4125 obj = yaffs_find_by_number(dev, obj_id); 4126 if (obj) 4127 return; 4128 } 4129 4130 /* Let's create it (if it does not exist) assuming it is a file so that 4131 * it can do shrinking etc. 4132 * We put it in unlinked dir to be cleaned up after the scanning 4133 */ 4134 obj = 4135 yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE); 4136 if (!obj) 4137 return; 4138 obj->is_shadowed = 1; 4139 yaffs_add_obj_to_dir(dev->unlinked_dir, obj); 4140 obj->variant.file_variant.shrink_size = 0; 4141 obj->valid = 1; /* So that we don't read any other info. */ 4142 } 4143 4144 void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list) 4145 { 4146 struct list_head *lh; 4147 struct list_head *save; 4148 struct yaffs_obj *hl; 4149 struct yaffs_obj *in; 4150 4151 list_for_each_safe(lh, save, hard_list) { 4152 hl = list_entry(lh, struct yaffs_obj, hard_links); 4153 in = yaffs_find_by_number(dev, 4154 hl->variant.hardlink_variant.equiv_id); 4155 4156 if (in) { 4157 /* Add the hardlink pointers */ 4158 hl->variant.hardlink_variant.equiv_obj = in; 4159 list_add(&hl->hard_links, &in->hard_links); 4160 } else { 4161 /* Todo Need to report/handle this better. 4162 * Got a problem... hardlink to a non-existant object 4163 */ 4164 hl->variant.hardlink_variant.equiv_obj = NULL; 4165 INIT_LIST_HEAD(&hl->hard_links); 4166 } 4167 } 4168 } 4169 4170 static void yaffs_strip_deleted_objs(struct yaffs_dev *dev) 4171 { 4172 /* 4173 * Sort out state of unlinked and deleted objects after scanning. 4174 */ 4175 struct list_head *i; 4176 struct list_head *n; 4177 struct yaffs_obj *l; 4178 4179 if (dev->read_only) 4180 return; 4181 4182 /* Soft delete all the unlinked files */ 4183 list_for_each_safe(i, n, 4184 &dev->unlinked_dir->variant.dir_variant.children) { 4185 l = list_entry(i, struct yaffs_obj, siblings); 4186 yaffs_del_obj(l); 4187 } 4188 4189 list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) { 4190 l = list_entry(i, struct yaffs_obj, siblings); 4191 yaffs_del_obj(l); 4192 } 4193 } 4194 4195 /* 4196 * This code iterates through all the objects making sure that they are rooted. 4197 * Any unrooted objects are re-rooted in lost+found. 4198 * An object needs to be in one of: 4199 * - Directly under deleted, unlinked 4200 * - Directly or indirectly under root. 4201 * 4202 * Note: 4203 * This code assumes that we don't ever change the current relationships 4204 * between directories: 4205 * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL 4206 * lost-n-found->parent == root_dir 4207 * 4208 * This fixes the problem where directories might have inadvertently been 4209 * deleted leaving the object "hanging" without being rooted in the 4210 * directory tree. 4211 */ 4212 4213 static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj) 4214 { 4215 return (obj == dev->del_dir || 4216 obj == dev->unlinked_dir || obj == dev->root_dir); 4217 } 4218 4219 static void yaffs_fix_hanging_objs(struct yaffs_dev *dev) 4220 { 4221 struct yaffs_obj *obj; 4222 struct yaffs_obj *parent; 4223 int i; 4224 struct list_head *lh; 4225 struct list_head *n; 4226 int depth_limit; 4227 int hanging; 4228 4229 if (dev->read_only) 4230 return; 4231 4232 /* Iterate through the objects in each hash entry, 4233 * looking at each object. 4234 * Make sure it is rooted. 4235 */ 4236 4237 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { 4238 list_for_each_safe(lh, n, &dev->obj_bucket[i].list) { 4239 obj = list_entry(lh, struct yaffs_obj, hash_link); 4240 parent = obj->parent; 4241 4242 if (yaffs_has_null_parent(dev, obj)) { 4243 /* These directories are not hanging */ 4244 hanging = 0; 4245 } else if (!parent || 4246 parent->variant_type != 4247 YAFFS_OBJECT_TYPE_DIRECTORY) { 4248 hanging = 1; 4249 } else if (yaffs_has_null_parent(dev, parent)) { 4250 hanging = 0; 4251 } else { 4252 /* 4253 * Need to follow the parent chain to 4254 * see if it is hanging. 4255 */ 4256 hanging = 0; 4257 depth_limit = 100; 4258 4259 while (parent != dev->root_dir && 4260 parent->parent && 4261 parent->parent->variant_type == 4262 YAFFS_OBJECT_TYPE_DIRECTORY && 4263 depth_limit > 0) { 4264 parent = parent->parent; 4265 depth_limit--; 4266 } 4267 if (parent != dev->root_dir) 4268 hanging = 1; 4269 } 4270 if (hanging) { 4271 yaffs_trace(YAFFS_TRACE_SCAN, 4272 "Hanging object %d moved to lost and found", 4273 obj->obj_id); 4274 yaffs_add_obj_to_dir(dev->lost_n_found, obj); 4275 } 4276 } 4277 } 4278 } 4279 4280 /* 4281 * Delete directory contents for cleaning up lost and found. 4282 */ 4283 static void yaffs_del_dir_contents(struct yaffs_obj *dir) 4284 { 4285 struct yaffs_obj *obj; 4286 struct list_head *lh; 4287 struct list_head *n; 4288 4289 if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) 4290 BUG(); 4291 4292 list_for_each_safe(lh, n, &dir->variant.dir_variant.children) { 4293 obj = list_entry(lh, struct yaffs_obj, siblings); 4294 if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) 4295 yaffs_del_dir_contents(obj); 4296 yaffs_trace(YAFFS_TRACE_SCAN, 4297 "Deleting lost_found object %d", 4298 obj->obj_id); 4299 yaffs_unlink_obj(obj); 4300 } 4301 } 4302 4303 static void yaffs_empty_l_n_f(struct yaffs_dev *dev) 4304 { 4305 yaffs_del_dir_contents(dev->lost_n_found); 4306 } 4307 4308 4309 struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory, 4310 const YCHAR *name) 4311 { 4312 int sum; 4313 struct list_head *i; 4314 YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1]; 4315 struct yaffs_obj *l; 4316 4317 if (!name) 4318 return NULL; 4319 4320 if (!directory) { 4321 yaffs_trace(YAFFS_TRACE_ALWAYS, 4322 "tragedy: yaffs_find_by_name: null pointer directory" 4323 ); 4324 BUG(); 4325 return NULL; 4326 } 4327 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 4328 yaffs_trace(YAFFS_TRACE_ALWAYS, 4329 "tragedy: yaffs_find_by_name: non-directory" 4330 ); 4331 BUG(); 4332 } 4333 4334 sum = yaffs_calc_name_sum(name); 4335 4336 list_for_each(i, &directory->variant.dir_variant.children) { 4337 l = list_entry(i, struct yaffs_obj, siblings); 4338 4339 if (l->parent != directory) 4340 BUG(); 4341 4342 yaffs_check_obj_details_loaded(l); 4343 4344 /* Special case for lost-n-found */ 4345 if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { 4346 if (!yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME)) 4347 return l; 4348 } else if (l->sum == sum || l->hdr_chunk <= 0) { 4349 /* LostnFound chunk called Objxxx 4350 * Do a real check 4351 */ 4352 yaffs_get_obj_name(l, buffer, 4353 YAFFS_MAX_NAME_LENGTH + 1); 4354 if (!yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH)) 4355 return l; 4356 } 4357 } 4358 return NULL; 4359 } 4360 4361 /* GetEquivalentObject dereferences any hard links to get to the 4362 * actual object. 4363 */ 4364 4365 struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj) 4366 { 4367 if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) { 4368 obj = obj->variant.hardlink_variant.equiv_obj; 4369 yaffs_check_obj_details_loaded(obj); 4370 } 4371 return obj; 4372 } 4373 4374 /* 4375 * A note or two on object names. 4376 * * If the object name is missing, we then make one up in the form objnnn 4377 * 4378 * * ASCII names are stored in the object header's name field from byte zero 4379 * * Unicode names are historically stored starting from byte zero. 4380 * 4381 * Then there are automatic Unicode names... 4382 * The purpose of these is to save names in a way that can be read as 4383 * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII 4384 * system to share files. 4385 * 4386 * These automatic unicode are stored slightly differently... 4387 * - If the name can fit in the ASCII character space then they are saved as 4388 * ascii names as per above. 4389 * - If the name needs Unicode then the name is saved in Unicode 4390 * starting at oh->name[1]. 4391 4392 */ 4393 static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name, 4394 int buffer_size) 4395 { 4396 /* Create an object name if we could not find one. */ 4397 if (yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) { 4398 YCHAR local_name[20]; 4399 YCHAR num_string[20]; 4400 YCHAR *x = &num_string[19]; 4401 unsigned v = obj->obj_id; 4402 num_string[19] = 0; 4403 while (v > 0) { 4404 x--; 4405 *x = '0' + (v % 10); 4406 v /= 10; 4407 } 4408 /* make up a name */ 4409 yaffs_strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX); 4410 yaffs_strcat(local_name, x); 4411 yaffs_strncpy(name, local_name, buffer_size - 1); 4412 } 4413 } 4414 4415 int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size) 4416 { 4417 memset(name, 0, buffer_size * sizeof(YCHAR)); 4418 yaffs_check_obj_details_loaded(obj); 4419 if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { 4420 yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1); 4421 } else if (obj->short_name[0]) { 4422 yaffs_strcpy(name, obj->short_name); 4423 } else if (obj->hdr_chunk > 0) { 4424 u8 *buffer = yaffs_get_temp_buffer(obj->my_dev); 4425 4426 struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer; 4427 4428 memset(buffer, 0, obj->my_dev->data_bytes_per_chunk); 4429 4430 if (obj->hdr_chunk > 0) { 4431 yaffs_rd_chunk_tags_nand(obj->my_dev, 4432 obj->hdr_chunk, 4433 buffer, NULL); 4434 } 4435 yaffs_load_name_from_oh(obj->my_dev, name, oh->name, 4436 buffer_size); 4437 4438 yaffs_release_temp_buffer(obj->my_dev, buffer); 4439 } 4440 4441 yaffs_fix_null_name(obj, name, buffer_size); 4442 4443 return yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH); 4444 } 4445 4446 loff_t yaffs_get_obj_length(struct yaffs_obj *obj) 4447 { 4448 /* Dereference any hard linking */ 4449 obj = yaffs_get_equivalent_obj(obj); 4450 4451 if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) 4452 return obj->variant.file_variant.file_size; 4453 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { 4454 if (!obj->variant.symlink_variant.alias) 4455 return 0; 4456 return yaffs_strnlen(obj->variant.symlink_variant.alias, 4457 YAFFS_MAX_ALIAS_LENGTH); 4458 } else { 4459 /* Only a directory should drop through to here */ 4460 return obj->my_dev->data_bytes_per_chunk; 4461 } 4462 } 4463 4464 int yaffs_get_obj_link_count(struct yaffs_obj *obj) 4465 { 4466 int count = 0; 4467 struct list_head *i; 4468 4469 if (!obj->unlinked) 4470 count++; /* the object itself */ 4471 4472 list_for_each(i, &obj->hard_links) 4473 count++; /* add the hard links; */ 4474 4475 return count; 4476 } 4477 4478 int yaffs_get_obj_inode(struct yaffs_obj *obj) 4479 { 4480 obj = yaffs_get_equivalent_obj(obj); 4481 4482 return obj->obj_id; 4483 } 4484 4485 unsigned yaffs_get_obj_type(struct yaffs_obj *obj) 4486 { 4487 obj = yaffs_get_equivalent_obj(obj); 4488 4489 switch (obj->variant_type) { 4490 case YAFFS_OBJECT_TYPE_FILE: 4491 return DT_REG; 4492 break; 4493 case YAFFS_OBJECT_TYPE_DIRECTORY: 4494 return DT_DIR; 4495 break; 4496 case YAFFS_OBJECT_TYPE_SYMLINK: 4497 return DT_LNK; 4498 break; 4499 case YAFFS_OBJECT_TYPE_HARDLINK: 4500 return DT_REG; 4501 break; 4502 case YAFFS_OBJECT_TYPE_SPECIAL: 4503 if (S_ISFIFO(obj->yst_mode)) 4504 return DT_FIFO; 4505 if (S_ISCHR(obj->yst_mode)) 4506 return DT_CHR; 4507 if (S_ISBLK(obj->yst_mode)) 4508 return DT_BLK; 4509 if (S_ISSOCK(obj->yst_mode)) 4510 return DT_SOCK; 4511 return DT_REG; 4512 break; 4513 default: 4514 return DT_REG; 4515 break; 4516 } 4517 } 4518 4519 YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj) 4520 { 4521 obj = yaffs_get_equivalent_obj(obj); 4522 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) 4523 return yaffs_clone_str(obj->variant.symlink_variant.alias); 4524 else 4525 return yaffs_clone_str(_Y("")); 4526 } 4527 4528 /*--------------------------- Initialisation code -------------------------- */ 4529 4530 static int yaffs_check_dev_fns(const struct yaffs_dev *dev) 4531 { 4532 /* Common functions, gotta have */ 4533 if (!dev->param.erase_fn || !dev->param.initialise_flash_fn) 4534 return 0; 4535 4536 /* Can use the "with tags" style interface for yaffs1 or yaffs2 */ 4537 if (dev->param.write_chunk_tags_fn && 4538 dev->param.read_chunk_tags_fn && 4539 !dev->param.write_chunk_fn && 4540 !dev->param.read_chunk_fn && 4541 dev->param.bad_block_fn && dev->param.query_block_fn) 4542 return 1; 4543 4544 /* Can use the "spare" style interface for yaffs1 */ 4545 if (!dev->param.is_yaffs2 && 4546 !dev->param.write_chunk_tags_fn && 4547 !dev->param.read_chunk_tags_fn && 4548 dev->param.write_chunk_fn && 4549 dev->param.read_chunk_fn && 4550 !dev->param.bad_block_fn && !dev->param.query_block_fn) 4551 return 1; 4552 4553 return 0; /* bad */ 4554 } 4555 4556 static int yaffs_create_initial_dir(struct yaffs_dev *dev) 4557 { 4558 /* Initialise the unlinked, deleted, root and lost+found directories */ 4559 dev->lost_n_found = dev->root_dir = NULL; 4560 dev->unlinked_dir = dev->del_dir = NULL; 4561 dev->unlinked_dir = 4562 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR); 4563 dev->del_dir = 4564 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR); 4565 dev->root_dir = 4566 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT, 4567 YAFFS_ROOT_MODE | S_IFDIR); 4568 dev->lost_n_found = 4569 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND, 4570 YAFFS_LOSTNFOUND_MODE | S_IFDIR); 4571 4572 if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir 4573 && dev->del_dir) { 4574 yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found); 4575 return YAFFS_OK; 4576 } 4577 return YAFFS_FAIL; 4578 } 4579 4580 int yaffs_guts_initialise(struct yaffs_dev *dev) 4581 { 4582 int init_failed = 0; 4583 unsigned x; 4584 int bits; 4585 4586 yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()"); 4587 4588 /* Check stuff that must be set */ 4589 4590 if (!dev) { 4591 yaffs_trace(YAFFS_TRACE_ALWAYS, 4592 "yaffs: Need a device" 4593 ); 4594 return YAFFS_FAIL; 4595 } 4596 4597 if (dev->is_mounted) { 4598 yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted"); 4599 return YAFFS_FAIL; 4600 } 4601 4602 dev->internal_start_block = dev->param.start_block; 4603 dev->internal_end_block = dev->param.end_block; 4604 dev->block_offset = 0; 4605 dev->chunk_offset = 0; 4606 dev->n_free_chunks = 0; 4607 4608 dev->gc_block = 0; 4609 4610 if (dev->param.start_block == 0) { 4611 dev->internal_start_block = dev->param.start_block + 1; 4612 dev->internal_end_block = dev->param.end_block + 1; 4613 dev->block_offset = 1; 4614 dev->chunk_offset = dev->param.chunks_per_block; 4615 } 4616 4617 /* Check geometry parameters. */ 4618 4619 if ((!dev->param.inband_tags && dev->param.is_yaffs2 && 4620 dev->param.total_bytes_per_chunk < 1024) || 4621 (!dev->param.is_yaffs2 && 4622 dev->param.total_bytes_per_chunk < 512) || 4623 (dev->param.inband_tags && !dev->param.is_yaffs2) || 4624 dev->param.chunks_per_block < 2 || 4625 dev->param.n_reserved_blocks < 2 || 4626 dev->internal_start_block <= 0 || 4627 dev->internal_end_block <= 0 || 4628 dev->internal_end_block <= 4629 (dev->internal_start_block + dev->param.n_reserved_blocks + 2) 4630 ) { 4631 /* otherwise it is too small */ 4632 yaffs_trace(YAFFS_TRACE_ALWAYS, 4633 "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ", 4634 dev->param.total_bytes_per_chunk, 4635 dev->param.is_yaffs2 ? "2" : "", 4636 dev->param.inband_tags); 4637 return YAFFS_FAIL; 4638 } 4639 4640 if (yaffs_init_nand(dev) != YAFFS_OK) { 4641 yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed"); 4642 return YAFFS_FAIL; 4643 } 4644 4645 /* Sort out space for inband tags, if required */ 4646 if (dev->param.inband_tags) 4647 dev->data_bytes_per_chunk = 4648 dev->param.total_bytes_per_chunk - 4649 sizeof(struct yaffs_packed_tags2_tags_only); 4650 else 4651 dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk; 4652 4653 /* Got the right mix of functions? */ 4654 if (!yaffs_check_dev_fns(dev)) { 4655 /* Function missing */ 4656 yaffs_trace(YAFFS_TRACE_ALWAYS, 4657 "device function(s) missing or wrong"); 4658 4659 return YAFFS_FAIL; 4660 } 4661 4662 /* Finished with most checks. Further checks happen later on too. */ 4663 4664 dev->is_mounted = 1; 4665 4666 /* OK now calculate a few things for the device */ 4667 4668 /* 4669 * Calculate all the chunk size manipulation numbers: 4670 */ 4671 x = dev->data_bytes_per_chunk; 4672 /* We always use dev->chunk_shift and dev->chunk_div */ 4673 dev->chunk_shift = calc_shifts(x); 4674 x >>= dev->chunk_shift; 4675 dev->chunk_div = x; 4676 /* We only use chunk mask if chunk_div is 1 */ 4677 dev->chunk_mask = (1 << dev->chunk_shift) - 1; 4678 4679 /* 4680 * Calculate chunk_grp_bits. 4681 * We need to find the next power of 2 > than internal_end_block 4682 */ 4683 4684 x = dev->param.chunks_per_block * (dev->internal_end_block + 1); 4685 4686 bits = calc_shifts_ceiling(x); 4687 4688 /* Set up tnode width if wide tnodes are enabled. */ 4689 if (!dev->param.wide_tnodes_disabled) { 4690 /* bits must be even so that we end up with 32-bit words */ 4691 if (bits & 1) 4692 bits++; 4693 if (bits < 16) 4694 dev->tnode_width = 16; 4695 else 4696 dev->tnode_width = bits; 4697 } else { 4698 dev->tnode_width = 16; 4699 } 4700 4701 dev->tnode_mask = (1 << dev->tnode_width) - 1; 4702 4703 /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled), 4704 * so if the bitwidth of the 4705 * chunk range we're using is greater than 16 we need 4706 * to figure out chunk shift and chunk_grp_size 4707 */ 4708 4709 if (bits <= dev->tnode_width) 4710 dev->chunk_grp_bits = 0; 4711 else 4712 dev->chunk_grp_bits = bits - dev->tnode_width; 4713 4714 dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8; 4715 if (dev->tnode_size < sizeof(struct yaffs_tnode)) 4716 dev->tnode_size = sizeof(struct yaffs_tnode); 4717 4718 dev->chunk_grp_size = 1 << dev->chunk_grp_bits; 4719 4720 if (dev->param.chunks_per_block < dev->chunk_grp_size) { 4721 /* We have a problem because the soft delete won't work if 4722 * the chunk group size > chunks per block. 4723 * This can be remedied by using larger "virtual blocks". 4724 */ 4725 yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large"); 4726 4727 return YAFFS_FAIL; 4728 } 4729 4730 /* Finished verifying the device, continue with initialisation */ 4731 4732 /* More device initialisation */ 4733 dev->all_gcs = 0; 4734 dev->passive_gc_count = 0; 4735 dev->oldest_dirty_gc_count = 0; 4736 dev->bg_gcs = 0; 4737 dev->gc_block_finder = 0; 4738 dev->buffered_block = -1; 4739 dev->doing_buffered_block_rewrite = 0; 4740 dev->n_deleted_files = 0; 4741 dev->n_bg_deletions = 0; 4742 dev->n_unlinked_files = 0; 4743 dev->n_ecc_fixed = 0; 4744 dev->n_ecc_unfixed = 0; 4745 dev->n_tags_ecc_fixed = 0; 4746 dev->n_tags_ecc_unfixed = 0; 4747 dev->n_erase_failures = 0; 4748 dev->n_erased_blocks = 0; 4749 dev->gc_disable = 0; 4750 dev->has_pending_prioritised_gc = 1; 4751 /* Assume the worst for now, will get fixed on first GC */ 4752 INIT_LIST_HEAD(&dev->dirty_dirs); 4753 dev->oldest_dirty_seq = 0; 4754 dev->oldest_dirty_block = 0; 4755 4756 /* Initialise temporary buffers and caches. */ 4757 if (!yaffs_init_tmp_buffers(dev)) 4758 init_failed = 1; 4759 4760 dev->cache = NULL; 4761 dev->gc_cleanup_list = NULL; 4762 4763 if (!init_failed && dev->param.n_caches > 0) { 4764 int i; 4765 void *buf; 4766 int cache_bytes = 4767 dev->param.n_caches * sizeof(struct yaffs_cache); 4768 4769 if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES) 4770 dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES; 4771 4772 dev->cache = kmalloc(cache_bytes, GFP_NOFS); 4773 4774 buf = (u8 *) dev->cache; 4775 4776 if (dev->cache) 4777 memset(dev->cache, 0, cache_bytes); 4778 4779 for (i = 0; i < dev->param.n_caches && buf; i++) { 4780 dev->cache[i].object = NULL; 4781 dev->cache[i].last_use = 0; 4782 dev->cache[i].dirty = 0; 4783 dev->cache[i].data = buf = 4784 kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); 4785 } 4786 if (!buf) 4787 init_failed = 1; 4788 4789 dev->cache_last_use = 0; 4790 } 4791 4792 dev->cache_hits = 0; 4793 4794 if (!init_failed) { 4795 dev->gc_cleanup_list = 4796 kmalloc(dev->param.chunks_per_block * sizeof(u32), 4797 GFP_NOFS); 4798 if (!dev->gc_cleanup_list) 4799 init_failed = 1; 4800 } 4801 4802 if (dev->param.is_yaffs2) 4803 dev->param.use_header_file_size = 1; 4804 4805 if (!init_failed && !yaffs_init_blocks(dev)) 4806 init_failed = 1; 4807 4808 yaffs_init_tnodes_and_objs(dev); 4809 4810 if (!init_failed && !yaffs_create_initial_dir(dev)) 4811 init_failed = 1; 4812 4813 if (!init_failed && dev->param.is_yaffs2 && 4814 !dev->param.disable_summary && 4815 !yaffs_summary_init(dev)) 4816 init_failed = 1; 4817 4818 if (!init_failed) { 4819 /* Now scan the flash. */ 4820 if (dev->param.is_yaffs2) { 4821 if (yaffs2_checkpt_restore(dev)) { 4822 yaffs_check_obj_details_loaded(dev->root_dir); 4823 yaffs_trace(YAFFS_TRACE_CHECKPOINT | 4824 YAFFS_TRACE_MOUNT, 4825 "yaffs: restored from checkpoint" 4826 ); 4827 } else { 4828 4829 /* Clean up the mess caused by an aborted 4830 * checkpoint load then scan backwards. 4831 */ 4832 yaffs_deinit_blocks(dev); 4833 4834 yaffs_deinit_tnodes_and_objs(dev); 4835 4836 dev->n_erased_blocks = 0; 4837 dev->n_free_chunks = 0; 4838 dev->alloc_block = -1; 4839 dev->alloc_page = -1; 4840 dev->n_deleted_files = 0; 4841 dev->n_unlinked_files = 0; 4842 dev->n_bg_deletions = 0; 4843 4844 if (!init_failed && !yaffs_init_blocks(dev)) 4845 init_failed = 1; 4846 4847 yaffs_init_tnodes_and_objs(dev); 4848 4849 if (!init_failed 4850 && !yaffs_create_initial_dir(dev)) 4851 init_failed = 1; 4852 4853 if (!init_failed && !yaffs2_scan_backwards(dev)) 4854 init_failed = 1; 4855 } 4856 } else if (!yaffs1_scan(dev)) { 4857 init_failed = 1; 4858 } 4859 4860 yaffs_strip_deleted_objs(dev); 4861 yaffs_fix_hanging_objs(dev); 4862 if (dev->param.empty_lost_n_found) 4863 yaffs_empty_l_n_f(dev); 4864 } 4865 4866 if (init_failed) { 4867 /* Clean up the mess */ 4868 yaffs_trace(YAFFS_TRACE_TRACING, 4869 "yaffs: yaffs_guts_initialise() aborted."); 4870 4871 yaffs_deinitialise(dev); 4872 return YAFFS_FAIL; 4873 } 4874 4875 /* Zero out stats */ 4876 dev->n_page_reads = 0; 4877 dev->n_page_writes = 0; 4878 dev->n_erasures = 0; 4879 dev->n_gc_copies = 0; 4880 dev->n_retried_writes = 0; 4881 4882 dev->n_retired_blocks = 0; 4883 4884 yaffs_verify_free_chunks(dev); 4885 yaffs_verify_blocks(dev); 4886 4887 /* Clean up any aborted checkpoint data */ 4888 if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0) 4889 yaffs2_checkpt_invalidate(dev); 4890 4891 yaffs_trace(YAFFS_TRACE_TRACING, 4892 "yaffs: yaffs_guts_initialise() done."); 4893 return YAFFS_OK; 4894 } 4895 4896 void yaffs_deinitialise(struct yaffs_dev *dev) 4897 { 4898 if (dev->is_mounted) { 4899 int i; 4900 4901 yaffs_deinit_blocks(dev); 4902 yaffs_deinit_tnodes_and_objs(dev); 4903 yaffs_summary_deinit(dev); 4904 4905 if (dev->param.n_caches > 0 && dev->cache) { 4906 4907 for (i = 0; i < dev->param.n_caches; i++) { 4908 kfree(dev->cache[i].data); 4909 dev->cache[i].data = NULL; 4910 } 4911 4912 kfree(dev->cache); 4913 dev->cache = NULL; 4914 } 4915 4916 kfree(dev->gc_cleanup_list); 4917 4918 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) 4919 kfree(dev->temp_buffer[i].buffer); 4920 4921 dev->is_mounted = 0; 4922 4923 if (dev->param.deinitialise_flash_fn) 4924 dev->param.deinitialise_flash_fn(dev); 4925 } 4926 } 4927 4928 int yaffs_count_free_chunks(struct yaffs_dev *dev) 4929 { 4930 int n_free = 0; 4931 int b; 4932 struct yaffs_block_info *blk; 4933 4934 blk = dev->block_info; 4935 for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) { 4936 switch (blk->block_state) { 4937 case YAFFS_BLOCK_STATE_EMPTY: 4938 case YAFFS_BLOCK_STATE_ALLOCATING: 4939 case YAFFS_BLOCK_STATE_COLLECTING: 4940 case YAFFS_BLOCK_STATE_FULL: 4941 n_free += 4942 (dev->param.chunks_per_block - blk->pages_in_use + 4943 blk->soft_del_pages); 4944 break; 4945 default: 4946 break; 4947 } 4948 blk++; 4949 } 4950 return n_free; 4951 } 4952 4953 int yaffs_get_n_free_chunks(struct yaffs_dev *dev) 4954 { 4955 /* This is what we report to the outside world */ 4956 int n_free; 4957 int n_dirty_caches; 4958 int blocks_for_checkpt; 4959 int i; 4960 4961 n_free = dev->n_free_chunks; 4962 n_free += dev->n_deleted_files; 4963 4964 /* Now count and subtract the number of dirty chunks in the cache. */ 4965 4966 for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) { 4967 if (dev->cache[i].dirty) 4968 n_dirty_caches++; 4969 } 4970 4971 n_free -= n_dirty_caches; 4972 4973 n_free -= 4974 ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block); 4975 4976 /* Now figure checkpoint space and report that... */ 4977 blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev); 4978 4979 n_free -= (blocks_for_checkpt * dev->param.chunks_per_block); 4980 4981 if (n_free < 0) 4982 n_free = 0; 4983 4984 return n_free; 4985 } 4986 4987 /*\ 4988 * Marshalling functions to get loff_t file sizes into aand out of 4989 * object headers. 4990 */ 4991 void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize) 4992 { 4993 oh->file_size_low = (fsize & 0xFFFFFFFF); 4994 oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF); 4995 } 4996 4997 loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh) 4998 { 4999 loff_t retval; 5000 5001 if (~(oh->file_size_high)) 5002 retval = (((loff_t) oh->file_size_high) << 32) | 5003 (((loff_t) oh->file_size_low) & 0xFFFFFFFF); 5004 else 5005 retval = (loff_t) oh->file_size_low; 5006 5007 return retval; 5008 } 5009