1 /* 2 * Create a squashfs filesystem. This is a highly compressed read only 3 * filesystem. 4 * 5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 6 * 2012, 2013, 2014 7 * Phillip Lougher <phillip (at) squashfs.org.uk> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2, 12 * or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * 23 * mksquashfs.c 24 */ 25 26 #define FALSE 0 27 #define TRUE 1 28 #define MAX_LINE 16384 29 30 #include <pwd.h> 31 #include <grp.h> 32 #include <time.h> 33 #include <unistd.h> 34 #include <stdio.h> 35 #include <stddef.h> 36 #include <sys/types.h> 37 #include <sys/stat.h> 38 #include <fcntl.h> 39 #include <errno.h> 40 #include <dirent.h> 41 #include <string.h> 42 #include <stdlib.h> 43 #include <signal.h> 44 #include <setjmp.h> 45 #include <sys/types.h> 46 #include <sys/mman.h> 47 #include <pthread.h> 48 #include <regex.h> 49 #include <fnmatch.h> 50 #include <sys/wait.h> 51 #include <limits.h> 52 #include <ctype.h> 53 54 #ifndef FNM_EXTMATCH /* glibc extension */ 55 #define FNM_EXTMATCH 0 56 #endif 57 58 #ifndef linux 59 #define __BYTE_ORDER BYTE_ORDER 60 #define __BIG_ENDIAN BIG_ENDIAN 61 #define __LITTLE_ENDIAN LITTLE_ENDIAN 62 #include <sys/sysctl.h> 63 #else 64 #include <endian.h> 65 #include <sys/sysinfo.h> 66 #endif 67 68 #include "squashfs_fs.h" 69 #include "squashfs_swap.h" 70 #include "mksquashfs.h" 71 #include "sort.h" 72 #include "pseudo.h" 73 #include "compressor.h" 74 #include "xattr.h" 75 #include "action.h" 76 #include "error.h" 77 #include "progressbar.h" 78 #include "info.h" 79 #include "caches-queues-lists.h" 80 #include "read_fs.h" 81 #include "restore.h" 82 #include "process_fragments.h" 83 84 /* ANDROID CHANGES START*/ 85 #ifdef ANDROID 86 #include "android.h" 87 #include "private/android_filesystem_config.h" 88 #include "private/canned_fs_config.h" 89 int android_config = FALSE; 90 char *context_file = NULL; 91 char *mount_point = NULL; 92 char *target_out_path = NULL; 93 fs_config_func_t fs_config_func = NULL; 94 int compress_thresh_per = 0; 95 int align_4k_blocks = TRUE; 96 FILE *block_map_file = NULL; 97 #endif 98 /* ANDROID CHANGES END */ 99 100 int delete = FALSE; 101 int fd; 102 struct squashfs_super_block sBlk; 103 104 /* filesystem flags for building */ 105 int comp_opts = FALSE; 106 int no_xattrs = XATTR_DEF; 107 int noX = FALSE; 108 int duplicate_checking = TRUE; 109 int noF = FALSE; 110 int no_fragments = FALSE; 111 int always_use_fragments = FALSE; 112 int noI = FALSE; 113 int noD = FALSE; 114 int silent = TRUE; 115 int exportable = TRUE; 116 int sparse_files = TRUE; 117 int old_exclude = TRUE; 118 int use_regex = FALSE; 119 int nopad = FALSE; 120 int exit_on_error = FALSE; 121 122 long long global_uid = -1, global_gid = -1; 123 124 /* superblock attributes */ 125 int block_size = SQUASHFS_FILE_SIZE, block_log; 126 unsigned int id_count = 0; 127 int file_count = 0, sym_count = 0, dev_count = 0, dir_count = 0, fifo_count = 0, 128 sock_count = 0; 129 130 /* ANDROID CHANGES START*/ 131 #ifdef ANDROID 132 int whitelisted_count = 0; 133 #endif 134 /* ANDROID CHANGES END */ 135 136 /* write position within data section */ 137 long long bytes = 0, total_bytes = 0; 138 139 /* in memory directory table - possibly compressed */ 140 char *directory_table = NULL; 141 unsigned int directory_bytes = 0, directory_size = 0, total_directory_bytes = 0; 142 143 /* cached directory table */ 144 char *directory_data_cache = NULL; 145 unsigned int directory_cache_bytes = 0, directory_cache_size = 0; 146 147 /* in memory inode table - possibly compressed */ 148 char *inode_table = NULL; 149 unsigned int inode_bytes = 0, inode_size = 0, total_inode_bytes = 0; 150 151 /* cached inode table */ 152 char *data_cache = NULL; 153 unsigned int cache_bytes = 0, cache_size = 0, inode_count = 0; 154 155 /* inode lookup table */ 156 squashfs_inode *inode_lookup_table = NULL; 157 158 /* in memory directory data */ 159 #define I_COUNT_SIZE 128 160 #define DIR_ENTRIES 32 161 #define INODE_HASH_SIZE 65536 162 #define INODE_HASH_MASK (INODE_HASH_SIZE - 1) 163 #define INODE_HASH(dev, ino) (ino & INODE_HASH_MASK) 164 165 struct cached_dir_index { 166 struct squashfs_dir_index index; 167 char *name; 168 }; 169 170 struct directory { 171 unsigned int start_block; 172 unsigned int size; 173 unsigned char *buff; 174 unsigned char *p; 175 unsigned int entry_count; 176 unsigned char *entry_count_p; 177 unsigned int i_count; 178 unsigned int i_size; 179 struct cached_dir_index *index; 180 unsigned char *index_count_p; 181 unsigned int inode_number; 182 }; 183 184 struct inode_info *inode_info[INODE_HASH_SIZE]; 185 186 /* hash tables used to do fast duplicate searches in duplicate check */ 187 struct file_info *dupl[65536]; 188 int dup_files = 0; 189 190 /* exclude file handling */ 191 /* list of exclude dirs/files */ 192 struct exclude_info { 193 dev_t st_dev; 194 ino_t st_ino; 195 }; 196 197 #define EXCLUDE_SIZE 8192 198 int exclude = 0; 199 struct exclude_info *exclude_paths = NULL; 200 int old_excluded(char *filename, struct stat *buf); 201 202 struct path_entry { 203 char *name; 204 regex_t *preg; 205 struct pathname *paths; 206 }; 207 208 struct pathname { 209 int names; 210 struct path_entry *name; 211 }; 212 213 struct pathnames { 214 int count; 215 struct pathname *path[0]; 216 }; 217 #define PATHS_ALLOC_SIZE 10 218 219 struct pathnames *paths = NULL; 220 struct pathname *path = NULL; 221 struct pathname *stickypath = NULL; 222 int excluded(char *name, struct pathnames *paths, struct pathnames **new); 223 224 int fragments = 0; 225 226 #define FRAG_SIZE 32768 227 228 struct squashfs_fragment_entry *fragment_table = NULL; 229 int fragments_outstanding = 0; 230 231 int fragments_locked = FALSE; 232 233 /* current inode number for directories and non directories */ 234 unsigned int inode_no = 1; 235 unsigned int root_inode_number = 0; 236 237 /* list of source dirs/files */ 238 int source = 0; 239 char **source_path; 240 241 /* list of root directory entries read from original filesystem */ 242 int old_root_entries = 0; 243 struct old_root_entry_info { 244 char *name; 245 struct inode_info inode; 246 }; 247 struct old_root_entry_info *old_root_entry; 248 249 /* restore orignal filesystem state if appending to existing filesystem is 250 * cancelled */ 251 int appending = FALSE; 252 char *sdata_cache, *sdirectory_data_cache, *sdirectory_compressed; 253 254 long long sbytes, stotal_bytes; 255 256 unsigned int sinode_bytes, scache_bytes, sdirectory_bytes, 257 sdirectory_cache_bytes, sdirectory_compressed_bytes, 258 stotal_inode_bytes, stotal_directory_bytes, 259 sinode_count = 0, sfile_count, ssym_count, sdev_count, 260 sdir_count, sfifo_count, ssock_count, sdup_files; 261 int sfragments; 262 int threads; 263 264 /* flag whether destination file is a block device */ 265 int block_device = FALSE; 266 267 /* flag indicating whether files are sorted using sort list(s) */ 268 int sorted = FALSE; 269 270 /* save destination file name for deleting on error */ 271 char *destination_file = NULL; 272 273 /* recovery file for abnormal exit on appending */ 274 char *recovery_file = NULL; 275 int recover = TRUE; 276 277 struct id *id_hash_table[ID_ENTRIES]; 278 struct id *id_table[SQUASHFS_IDS], *sid_table[SQUASHFS_IDS]; 279 unsigned int uid_count = 0, guid_count = 0; 280 unsigned int sid_count = 0, suid_count = 0, sguid_count = 0; 281 282 struct cache *reader_buffer, *fragment_buffer, *reserve_cache; 283 struct cache *bwriter_buffer, *fwriter_buffer; 284 struct queue *to_reader, *to_deflate, *to_writer, *from_writer, 285 *to_frag, *locked_fragment, *to_process_frag; 286 struct seq_queue *to_main; 287 pthread_t reader_thread, writer_thread, main_thread; 288 pthread_t *deflator_thread, *frag_deflator_thread, *frag_thread; 289 pthread_t *restore_thread = NULL; 290 pthread_mutex_t fragment_mutex = PTHREAD_MUTEX_INITIALIZER; 291 pthread_mutex_t pos_mutex = PTHREAD_MUTEX_INITIALIZER; 292 pthread_mutex_t dup_mutex = PTHREAD_MUTEX_INITIALIZER; 293 294 /* user options that control parallelisation */ 295 int processors = -1; 296 int bwriter_size; 297 298 /* compression operations */ 299 struct compressor *comp = NULL; 300 int compressor_opt_parsed = FALSE; 301 void *stream = NULL; 302 303 /* xattr stats */ 304 unsigned int xattr_bytes = 0, total_xattr_bytes = 0; 305 306 /* fragment to file mapping used when appending */ 307 int append_fragments = 0; 308 struct append_file **file_mapping; 309 310 /* root of the in-core directory structure */ 311 struct dir_info *root_dir; 312 313 static char *read_from_disk(long long start, unsigned int avail_bytes); 314 void add_old_root_entry(char *name, squashfs_inode inode, int inode_number, 315 int type); 316 struct file_info *duplicate(long long file_size, long long bytes, 317 unsigned int **block_list, long long *start, struct fragment **fragment, 318 struct file_buffer *file_buffer, int blocks, unsigned short checksum, 319 int checksum_flag); 320 struct dir_info *dir_scan1(char *, char *, struct pathnames *, 321 struct dir_ent *(_readdir)(struct dir_info *), int); 322 void dir_scan2(struct dir_info *dir, struct pseudo *pseudo); 323 void dir_scan3(struct dir_info *dir); 324 void dir_scan4(struct dir_info *dir); 325 void dir_scan5(struct dir_info *dir); 326 void dir_scan6(struct dir_info *dir); 327 void dir_scan7(squashfs_inode *inode, struct dir_info *dir_info); 328 struct file_info *add_non_dup(long long file_size, long long bytes, 329 unsigned int *block_list, long long start, struct fragment *fragment, 330 unsigned short checksum, unsigned short fragment_checksum, 331 int checksum_flag, int checksum_frag_flag); 332 long long generic_write_table(int, void *, int, void *, int); 333 void restorefs(); 334 struct dir_info *scan1_opendir(char *pathname, char *subpath, int depth); 335 void write_filesystem_tables(struct squashfs_super_block *sBlk, int nopad); 336 unsigned short get_checksum_mem(char *buff, int bytes); 337 void check_usable_phys_mem(int total_mem); 338 339 /* ANDROID CHANGES START*/ 340 #ifdef ANDROID 341 static int whitelisted(struct stat *buf); 342 static void add_whitelist_entry(char *filename, struct stat *buf); 343 static int add_whitelist(char *path); 344 static void process_whitelist_file(char *argv); 345 346 #define WHITELIST_SIZE 8192 347 int whitelist = 0; 348 349 struct whitelist_info { 350 dev_t st_dev; 351 ino_t st_ino; 352 }; 353 char *whitelist_filename = NULL; 354 struct whitelist_info *whitelist_paths = NULL; 355 #endif 356 /* ANDROID CHANGES END */ 357 358 void prep_exit() 359 { 360 if(restore_thread) { 361 if(pthread_self() == *restore_thread) { 362 /* 363 * Recursive failure when trying to restore filesystem! 364 * Nothing to do except to exit, otherwise we'll just 365 * appear to hang. The user should be able to restore 366 * from the recovery file (which is why it was added, in 367 * case of catastrophic failure in Mksquashfs) 368 */ 369 exit(1); 370 } else { 371 /* signal the restore thread to restore */ 372 pthread_kill(*restore_thread, SIGUSR1); 373 pthread_exit(NULL); 374 } 375 } else if(delete) { 376 if(destination_file && !block_device) 377 unlink(destination_file); 378 } else if(recovery_file) 379 unlink(recovery_file); 380 } 381 382 383 int add_overflow(int a, int b) 384 { 385 return (INT_MAX - a) < b; 386 } 387 388 389 int shift_overflow(int a, int shift) 390 { 391 return (INT_MAX >> shift) < a; 392 } 393 394 395 int multiply_overflow(int a, int multiplier) 396 { 397 return (INT_MAX / multiplier) < a; 398 } 399 400 401 int multiply_overflowll(long long a, int multiplier) 402 { 403 return (LLONG_MAX / multiplier) < a; 404 } 405 406 407 #define MKINODE(A) ((squashfs_inode)(((squashfs_inode) inode_bytes << 16) \ 408 + (((char *)A) - data_cache))) 409 410 411 void restorefs() 412 { 413 ERROR("Exiting - restoring original filesystem!\n\n"); 414 415 bytes = sbytes; 416 memcpy(data_cache, sdata_cache, cache_bytes = scache_bytes); 417 memcpy(directory_data_cache, sdirectory_data_cache, 418 sdirectory_cache_bytes); 419 directory_cache_bytes = sdirectory_cache_bytes; 420 inode_bytes = sinode_bytes; 421 directory_bytes = sdirectory_bytes; 422 memcpy(directory_table + directory_bytes, sdirectory_compressed, 423 sdirectory_compressed_bytes); 424 directory_bytes += sdirectory_compressed_bytes; 425 total_bytes = stotal_bytes; 426 total_inode_bytes = stotal_inode_bytes; 427 total_directory_bytes = stotal_directory_bytes; 428 inode_count = sinode_count; 429 file_count = sfile_count; 430 sym_count = ssym_count; 431 dev_count = sdev_count; 432 dir_count = sdir_count; 433 fifo_count = sfifo_count; 434 sock_count = ssock_count; 435 dup_files = sdup_files; 436 fragments = sfragments; 437 id_count = sid_count; 438 restore_xattrs(); 439 write_filesystem_tables(&sBlk, nopad); 440 exit(1); 441 } 442 443 444 void sighandler() 445 { 446 EXIT_MKSQUASHFS(); 447 } 448 449 450 int mangle2(void *strm, char *d, char *s, int size, 451 int block_size, int uncompressed, int data_block) 452 { 453 int error, c_byte = 0; 454 455 if(!uncompressed) { 456 c_byte = compressor_compress(comp, strm, d, s, size, block_size, 457 &error); 458 if(c_byte == -1) 459 BAD_ERROR("mangle2:: %s compress failed with error " 460 "code %d\n", comp->name, error); 461 } 462 463 if(c_byte == 0 || c_byte >= size || 464 (c_byte > (size * ((100.0 - compress_thresh_per) / 100.0)))) { 465 memcpy(d, s, size); 466 return size | (data_block ? SQUASHFS_COMPRESSED_BIT_BLOCK : 467 SQUASHFS_COMPRESSED_BIT); 468 } 469 470 return c_byte; 471 } 472 473 474 int mangle(char *d, char *s, int size, int block_size, 475 int uncompressed, int data_block) 476 { 477 return mangle2(stream, d, s, size, block_size, uncompressed, 478 data_block); 479 } 480 481 482 void *get_inode(int req_size) 483 { 484 int data_space; 485 unsigned short c_byte; 486 487 while(cache_bytes >= SQUASHFS_METADATA_SIZE) { 488 if((inode_size - inode_bytes) < 489 ((SQUASHFS_METADATA_SIZE << 1)) + 2) { 490 void *it = realloc(inode_table, inode_size + 491 (SQUASHFS_METADATA_SIZE << 1) + 2); 492 if(it == NULL) 493 MEM_ERROR(); 494 inode_table = it; 495 inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2; 496 } 497 498 c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET, 499 data_cache, SQUASHFS_METADATA_SIZE, 500 SQUASHFS_METADATA_SIZE, noI, 0); 501 TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte); 502 SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1); 503 inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET; 504 total_inode_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET; 505 memmove(data_cache, data_cache + SQUASHFS_METADATA_SIZE, 506 cache_bytes - SQUASHFS_METADATA_SIZE); 507 cache_bytes -= SQUASHFS_METADATA_SIZE; 508 } 509 510 data_space = (cache_size - cache_bytes); 511 if(data_space < req_size) { 512 int realloc_size = cache_size == 0 ? 513 ((req_size + SQUASHFS_METADATA_SIZE) & 514 ~(SQUASHFS_METADATA_SIZE - 1)) : req_size - 515 data_space; 516 517 void *dc = realloc(data_cache, cache_size + 518 realloc_size); 519 if(dc == NULL) 520 MEM_ERROR(); 521 cache_size += realloc_size; 522 data_cache = dc; 523 } 524 525 cache_bytes += req_size; 526 527 return data_cache + cache_bytes - req_size; 528 } 529 530 531 int read_bytes(int fd, void *buff, int bytes) 532 { 533 int res, count; 534 535 for(count = 0; count < bytes; count += res) { 536 res = read(fd, buff + count, bytes - count); 537 if(res < 1) { 538 if(res == 0) 539 goto bytes_read; 540 else if(errno != EINTR) { 541 ERROR("Read failed because %s\n", 542 strerror(errno)); 543 return -1; 544 } else 545 res = 0; 546 } 547 } 548 549 bytes_read: 550 return count; 551 } 552 553 554 int read_fs_bytes(int fd, long long byte, int bytes, void *buff) 555 { 556 off_t off = byte; 557 int res = 1; 558 559 TRACE("read_fs_bytes: reading from position 0x%llx, bytes %d\n", 560 byte, bytes); 561 562 pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex); 563 pthread_mutex_lock(&pos_mutex); 564 if(lseek(fd, off, SEEK_SET) == -1) { 565 ERROR("read_fs_bytes: Lseek on destination failed because %s, " 566 "offset=0x%llx\n", strerror(errno), off); 567 res = 0; 568 } else if(read_bytes(fd, buff, bytes) < bytes) { 569 ERROR("Read on destination failed\n"); 570 res = 0; 571 } 572 573 pthread_cleanup_pop(1); 574 return res; 575 } 576 577 578 int write_bytes(int fd, void *buff, int bytes) 579 { 580 int res, count; 581 582 for(count = 0; count < bytes; count += res) { 583 res = write(fd, buff + count, bytes - count); 584 if(res == -1) { 585 if(errno != EINTR) { 586 ERROR("Write failed because %s\n", 587 strerror(errno)); 588 return -1; 589 } 590 res = 0; 591 } 592 } 593 594 return 0; 595 } 596 597 598 void write_destination(int fd, long long byte, int bytes, void *buff) 599 { 600 off_t off = byte; 601 602 pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex); 603 pthread_mutex_lock(&pos_mutex); 604 605 if(lseek(fd, off, SEEK_SET) == -1) { 606 ERROR("write_destination: Lseek on destination " 607 "failed because %s, offset=0x%llx\n", strerror(errno), 608 off); 609 BAD_ERROR("Probably out of space on output %s\n", 610 block_device ? "block device" : "filesystem"); 611 } 612 613 if(write_bytes(fd, buff, bytes) == -1) 614 BAD_ERROR("Failed to write to output %s\n", 615 block_device ? "block device" : "filesystem"); 616 617 pthread_cleanup_pop(1); 618 } 619 620 621 long long write_inodes() 622 { 623 unsigned short c_byte; 624 int avail_bytes; 625 char *datap = data_cache; 626 long long start_bytes = bytes; 627 628 while(cache_bytes) { 629 if(inode_size - inode_bytes < 630 ((SQUASHFS_METADATA_SIZE << 1) + 2)) { 631 void *it = realloc(inode_table, inode_size + 632 ((SQUASHFS_METADATA_SIZE << 1) + 2)); 633 if(it == NULL) 634 MEM_ERROR(); 635 inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2; 636 inode_table = it; 637 } 638 avail_bytes = cache_bytes > SQUASHFS_METADATA_SIZE ? 639 SQUASHFS_METADATA_SIZE : cache_bytes; 640 c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET, datap, 641 avail_bytes, SQUASHFS_METADATA_SIZE, noI, 0); 642 TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte); 643 SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1); 644 inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET; 645 total_inode_bytes += avail_bytes + BLOCK_OFFSET; 646 datap += avail_bytes; 647 cache_bytes -= avail_bytes; 648 } 649 650 write_destination(fd, bytes, inode_bytes, inode_table); 651 bytes += inode_bytes; 652 653 return start_bytes; 654 } 655 656 657 long long write_directories() 658 { 659 unsigned short c_byte; 660 int avail_bytes; 661 char *directoryp = directory_data_cache; 662 long long start_bytes = bytes; 663 664 while(directory_cache_bytes) { 665 if(directory_size - directory_bytes < 666 ((SQUASHFS_METADATA_SIZE << 1) + 2)) { 667 void *dt = realloc(directory_table, 668 directory_size + ((SQUASHFS_METADATA_SIZE << 1) 669 + 2)); 670 if(dt == NULL) 671 MEM_ERROR(); 672 directory_size += (SQUASHFS_METADATA_SIZE << 1) + 2; 673 directory_table = dt; 674 } 675 avail_bytes = directory_cache_bytes > SQUASHFS_METADATA_SIZE ? 676 SQUASHFS_METADATA_SIZE : directory_cache_bytes; 677 c_byte = mangle(directory_table + directory_bytes + 678 BLOCK_OFFSET, directoryp, avail_bytes, 679 SQUASHFS_METADATA_SIZE, noI, 0); 680 TRACE("Directory block @ 0x%x, size %d\n", directory_bytes, 681 c_byte); 682 SQUASHFS_SWAP_SHORTS(&c_byte, 683 directory_table + directory_bytes, 1); 684 directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + 685 BLOCK_OFFSET; 686 total_directory_bytes += avail_bytes + BLOCK_OFFSET; 687 directoryp += avail_bytes; 688 directory_cache_bytes -= avail_bytes; 689 } 690 write_destination(fd, bytes, directory_bytes, directory_table); 691 bytes += directory_bytes; 692 693 return start_bytes; 694 } 695 696 697 long long write_id_table() 698 { 699 unsigned int id_bytes = SQUASHFS_ID_BYTES(id_count); 700 unsigned int p[id_count]; 701 int i; 702 703 TRACE("write_id_table: ids %d, id_bytes %d\n", id_count, id_bytes); 704 for(i = 0; i < id_count; i++) { 705 TRACE("write_id_table: id index %d, id %d", i, id_table[i]->id); 706 SQUASHFS_SWAP_INTS(&id_table[i]->id, p + i, 1); 707 } 708 709 return generic_write_table(id_bytes, p, 0, NULL, noI); 710 } 711 712 713 struct id *get_id(unsigned int id) 714 { 715 int hash = ID_HASH(id); 716 struct id *entry = id_hash_table[hash]; 717 718 for(; entry; entry = entry->next) 719 if(entry->id == id) 720 break; 721 722 return entry; 723 } 724 725 726 struct id *create_id(unsigned int id) 727 { 728 int hash = ID_HASH(id); 729 struct id *entry = malloc(sizeof(struct id)); 730 if(entry == NULL) 731 MEM_ERROR(); 732 entry->id = id; 733 entry->index = id_count ++; 734 entry->flags = 0; 735 entry->next = id_hash_table[hash]; 736 id_hash_table[hash] = entry; 737 id_table[entry->index] = entry; 738 return entry; 739 } 740 741 742 unsigned int get_uid(unsigned int uid) 743 { 744 struct id *entry = get_id(uid); 745 746 if(entry == NULL) { 747 if(id_count == SQUASHFS_IDS) 748 BAD_ERROR("Out of uids!\n"); 749 entry = create_id(uid); 750 } 751 752 if((entry->flags & ISA_UID) == 0) { 753 entry->flags |= ISA_UID; 754 uid_count ++; 755 } 756 757 return entry->index; 758 } 759 760 761 unsigned int get_guid(unsigned int guid) 762 { 763 struct id *entry = get_id(guid); 764 765 if(entry == NULL) { 766 if(id_count == SQUASHFS_IDS) 767 BAD_ERROR("Out of gids!\n"); 768 entry = create_id(guid); 769 } 770 771 if((entry->flags & ISA_GID) == 0) { 772 entry->flags |= ISA_GID; 773 guid_count ++; 774 } 775 776 return entry->index; 777 } 778 779 780 #define ALLOC_SIZE 128 781 782 char *_pathname(struct dir_ent *dir_ent, char *pathname, int *size) 783 { 784 if(pathname == NULL) { 785 pathname = malloc(ALLOC_SIZE); 786 if(pathname == NULL) 787 MEM_ERROR(); 788 } 789 790 for(;;) { 791 int res = snprintf(pathname, *size, "%s/%s", 792 dir_ent->our_dir->pathname, 793 dir_ent->source_name ? : dir_ent->name); 794 795 if(res < 0) 796 BAD_ERROR("snprintf failed in pathname\n"); 797 else if(res >= *size) { 798 /* 799 * pathname is too small to contain the result, so 800 * increase it and try again 801 */ 802 *size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1); 803 pathname = realloc(pathname, *size); 804 if(pathname == NULL) 805 MEM_ERROR(); 806 } else 807 break; 808 } 809 810 return pathname; 811 } 812 813 814 char *pathname(struct dir_ent *dir_ent) 815 { 816 static char *pathname = NULL; 817 static int size = ALLOC_SIZE; 818 819 if (dir_ent->nonstandard_pathname) 820 return dir_ent->nonstandard_pathname; 821 822 return pathname = _pathname(dir_ent, pathname, &size); 823 } 824 825 826 char *pathname_reader(struct dir_ent *dir_ent) 827 { 828 static char *pathname = NULL; 829 static int size = ALLOC_SIZE; 830 831 if (dir_ent->nonstandard_pathname) 832 return dir_ent->nonstandard_pathname; 833 834 return pathname = _pathname(dir_ent, pathname, &size); 835 } 836 837 838 char *subpathname(struct dir_ent *dir_ent) 839 { 840 static char *subpath = NULL; 841 static int size = ALLOC_SIZE; 842 int res; 843 844 if(subpath == NULL) { 845 subpath = malloc(ALLOC_SIZE); 846 if(subpath == NULL) 847 MEM_ERROR(); 848 } 849 850 for(;;) { 851 if(dir_ent->our_dir->subpath[0] != '\0') 852 res = snprintf(subpath, size, "%s/%s", 853 dir_ent->our_dir->subpath, dir_ent->name); 854 else 855 res = snprintf(subpath, size, "/%s", dir_ent->name); 856 857 if(res < 0) 858 BAD_ERROR("snprintf failed in subpathname\n"); 859 else if(res >= size) { 860 /* 861 * subpath is too small to contain the result, so 862 * increase it and try again 863 */ 864 size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1); 865 subpath = realloc(subpath, size); 866 if(subpath == NULL) 867 MEM_ERROR(); 868 } else 869 break; 870 } 871 872 return subpath; 873 } 874 875 876 static inline unsigned int get_inode_no(struct inode_info *inode) 877 { 878 return inode->inode_number; 879 } 880 881 882 static inline unsigned int get_parent_no(struct dir_info *dir) 883 { 884 return dir->depth ? get_inode_no(dir->dir_ent->inode) : inode_no; 885 } 886 887 888 /* ANDROID CHANGES START*/ 889 #ifdef ANDROID 890 891 /* Round up the passed |n| value to the smallest multiple of 4096 greater or 892 * equal than |n| and return the 4K-block number for that value. */ 893 static unsigned long long round_up_block(unsigned long long n) { 894 const unsigned long long kMapBlockSize = 4096; 895 return (n + kMapBlockSize - 1) / kMapBlockSize; 896 } 897 898 static inline void write_block_map_entry(char *sub_path, unsigned long long start_block, unsigned long long total_size, 899 char * mount_point, FILE *block_map_file) { 900 if (block_map_file) { 901 /* We assign each 4K block based on what file the first byte of the block 902 * belongs to. The current file consists of the chunk of bytes in the 903 * interval [start_block, start_block + total_size), (closed on the left end 904 * and open on the right end). We then compute the first block whose first 905 * byte is equal to or greater than start_block as |round_start| and then 906 * the first block whose first byte is *past* this interval, as 907 * |round_end + 1|. This means that the blocks that should be assigned to 908 * the current file are in the interval [round_start, round_end + 1), or 909 * simply [round_start, round_end]. 910 */ 911 unsigned long long round_start = round_up_block(start_block); 912 unsigned long long round_end = round_up_block(start_block + total_size) - 1; 913 if (round_start && total_size && round_start <= round_end) { 914 fprintf(block_map_file, "/%s", mount_point); 915 if (sub_path[0] != '/') fprintf(block_map_file, "/"); 916 if (round_start == round_end) 917 fprintf(block_map_file, "%s %lld\n", sub_path, round_start); 918 else 919 fprintf(block_map_file, "%s %lld-%lld\n", sub_path, round_start, round_end); 920 } 921 } 922 } 923 #endif 924 /* ANDROID CHANGES END */ 925 926 int create_inode(squashfs_inode *i_no, struct dir_info *dir_info, 927 struct dir_ent *dir_ent, int type, long long byte_size, 928 long long start_block, unsigned int offset, unsigned int *block_list, 929 struct fragment *fragment, struct directory *dir_in, long long sparse) 930 { 931 struct stat *buf = &dir_ent->inode->buf; 932 union squashfs_inode_header inode_header; 933 struct squashfs_base_inode_header *base = &inode_header.base; 934 void *inode; 935 char *filename = pathname(dir_ent); 936 int nlink = dir_ent->inode->nlink; 937 int xattr = read_xattrs(dir_ent); 938 939 switch(type) { 940 case SQUASHFS_FILE_TYPE: 941 if(dir_ent->inode->nlink > 1 || 942 byte_size >= (1LL << 32) || 943 start_block >= (1LL << 32) || 944 sparse || IS_XATTR(xattr)) 945 type = SQUASHFS_LREG_TYPE; 946 break; 947 case SQUASHFS_DIR_TYPE: 948 if(dir_info->dir_is_ldir || IS_XATTR(xattr)) 949 type = SQUASHFS_LDIR_TYPE; 950 break; 951 case SQUASHFS_SYMLINK_TYPE: 952 if(IS_XATTR(xattr)) 953 type = SQUASHFS_LSYMLINK_TYPE; 954 break; 955 case SQUASHFS_BLKDEV_TYPE: 956 if(IS_XATTR(xattr)) 957 type = SQUASHFS_LBLKDEV_TYPE; 958 break; 959 case SQUASHFS_CHRDEV_TYPE: 960 if(IS_XATTR(xattr)) 961 type = SQUASHFS_LCHRDEV_TYPE; 962 break; 963 case SQUASHFS_FIFO_TYPE: 964 if(IS_XATTR(xattr)) 965 type = SQUASHFS_LFIFO_TYPE; 966 break; 967 case SQUASHFS_SOCKET_TYPE: 968 if(IS_XATTR(xattr)) 969 type = SQUASHFS_LSOCKET_TYPE; 970 break; 971 } 972 973 base->mode = SQUASHFS_MODE(buf->st_mode); 974 base->uid = get_uid((unsigned int) global_uid == -1 ? 975 buf->st_uid : global_uid); 976 base->inode_type = type; 977 base->guid = get_guid((unsigned int) global_gid == -1 ? 978 buf->st_gid : global_gid); 979 base->mtime = buf->st_mtime; 980 base->inode_number = get_inode_no(dir_ent->inode); 981 982 if(type == SQUASHFS_FILE_TYPE) { 983 int i; 984 struct squashfs_reg_inode_header *reg = &inode_header.reg; 985 size_t off = offsetof(struct squashfs_reg_inode_header, block_list); 986 /* ANDROID CHANGES START*/ 987 #ifdef ANDROID 988 unsigned long long total_size = 0; 989 char *sub_path; 990 #endif 991 /* ANDROID CHANGES END */ 992 993 inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int)); 994 reg->file_size = byte_size; 995 reg->start_block = start_block; 996 reg->fragment = fragment->index; 997 reg->offset = fragment->offset; 998 SQUASHFS_SWAP_REG_INODE_HEADER(reg, inode); 999 SQUASHFS_SWAP_INTS(block_list, inode + off, offset); 1000 TRACE("File inode, file_size %lld, start_block 0x%llx, blocks " 1001 "%d, fragment %d, offset %d, size %d\n", byte_size, 1002 start_block, offset, fragment->index, fragment->offset, 1003 fragment->size); 1004 for(i = 0; i < offset; i++) { 1005 TRACE("Block %d, size %d\n", i, block_list[i]); 1006 total_size += SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]); 1007 } 1008 /* ANDROID CHANGES START*/ 1009 #ifdef ANDROID 1010 sub_path = subpathname(dir_ent); 1011 if (block_map_file && fragment->index == -1) { 1012 write_block_map_entry(sub_path, start_block, total_size, mount_point, block_map_file); 1013 } 1014 #endif 1015 /* ANDROID CHANGES END */ 1016 } 1017 else if(type == SQUASHFS_LREG_TYPE) { 1018 /* ANDROID CHANGES START*/ 1019 #ifdef ANDROID 1020 unsigned long long total_size = 0; 1021 char *sub_path; 1022 #endif 1023 /* ANDROID CHANGES END */ 1024 int i; 1025 struct squashfs_lreg_inode_header *reg = &inode_header.lreg; 1026 size_t off = offsetof(struct squashfs_lreg_inode_header, block_list); 1027 1028 inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int)); 1029 reg->nlink = nlink; 1030 reg->file_size = byte_size; 1031 reg->start_block = start_block; 1032 reg->fragment = fragment->index; 1033 reg->offset = fragment->offset; 1034 if(sparse && sparse >= byte_size) 1035 sparse = byte_size - 1; 1036 reg->sparse = sparse; 1037 reg->xattr = xattr; 1038 SQUASHFS_SWAP_LREG_INODE_HEADER(reg, inode); 1039 SQUASHFS_SWAP_INTS(block_list, inode + off, offset); 1040 TRACE("Long file inode, file_size %lld, start_block 0x%llx, " 1041 "blocks %d, fragment %d, offset %d, size %d, nlink %d" 1042 "\n", byte_size, start_block, offset, fragment->index, 1043 fragment->offset, fragment->size, nlink); 1044 for(i = 0; i < offset; i++) { 1045 TRACE("Block %d, size %d\n", i, block_list[i]); 1046 total_size += SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]); 1047 } 1048 /* ANDROID CHANGES START*/ 1049 #ifdef ANDROID 1050 sub_path = subpathname(dir_ent); 1051 if (block_map_file && fragment->index == -1) { 1052 write_block_map_entry(sub_path, start_block, total_size, mount_point, block_map_file); 1053 } 1054 #endif 1055 /* ANDROID CHANGES END */ 1056 } 1057 else if(type == SQUASHFS_LDIR_TYPE) { 1058 int i; 1059 unsigned char *p; 1060 struct squashfs_ldir_inode_header *dir = &inode_header.ldir; 1061 struct cached_dir_index *index = dir_in->index; 1062 unsigned int i_count = dir_in->i_count; 1063 unsigned int i_size = dir_in->i_size; 1064 1065 if(byte_size >= 1 << 27) 1066 BAD_ERROR("directory greater than 2^27-1 bytes!\n"); 1067 1068 inode = get_inode(sizeof(*dir) + i_size); 1069 dir->inode_type = SQUASHFS_LDIR_TYPE; 1070 dir->nlink = dir_ent->dir->directory_count + 2; 1071 dir->file_size = byte_size; 1072 dir->offset = offset; 1073 dir->start_block = start_block; 1074 dir->i_count = i_count; 1075 dir->parent_inode = get_parent_no(dir_ent->our_dir); 1076 dir->xattr = xattr; 1077 1078 SQUASHFS_SWAP_LDIR_INODE_HEADER(dir, inode); 1079 p = inode + offsetof(struct squashfs_ldir_inode_header, index); 1080 for(i = 0; i < i_count; i++) { 1081 SQUASHFS_SWAP_DIR_INDEX(&index[i].index, p); 1082 p += offsetof(struct squashfs_dir_index, name); 1083 memcpy(p, index[i].name, index[i].index.size + 1); 1084 p += index[i].index.size + 1; 1085 } 1086 TRACE("Long directory inode, file_size %lld, start_block " 1087 "0x%llx, offset 0x%x, nlink %d\n", byte_size, 1088 start_block, offset, dir_ent->dir->directory_count + 2); 1089 } 1090 else if(type == SQUASHFS_DIR_TYPE) { 1091 struct squashfs_dir_inode_header *dir = &inode_header.dir; 1092 1093 inode = get_inode(sizeof(*dir)); 1094 dir->nlink = dir_ent->dir->directory_count + 2; 1095 dir->file_size = byte_size; 1096 dir->offset = offset; 1097 dir->start_block = start_block; 1098 dir->parent_inode = get_parent_no(dir_ent->our_dir); 1099 SQUASHFS_SWAP_DIR_INODE_HEADER(dir, inode); 1100 TRACE("Directory inode, file_size %lld, start_block 0x%llx, " 1101 "offset 0x%x, nlink %d\n", byte_size, start_block, 1102 offset, dir_ent->dir->directory_count + 2); 1103 } 1104 else if(type == SQUASHFS_CHRDEV_TYPE || type == SQUASHFS_BLKDEV_TYPE) { 1105 struct squashfs_dev_inode_header *dev = &inode_header.dev; 1106 unsigned int major = major(buf->st_rdev); 1107 unsigned int minor = minor(buf->st_rdev); 1108 1109 if(major > 0xfff) { 1110 ERROR("Major %d out of range in device node %s, " 1111 "truncating to %d\n", major, filename, 1112 major & 0xfff); 1113 major &= 0xfff; 1114 } 1115 if(minor > 0xfffff) { 1116 ERROR("Minor %d out of range in device node %s, " 1117 "truncating to %d\n", minor, filename, 1118 minor & 0xfffff); 1119 minor &= 0xfffff; 1120 } 1121 inode = get_inode(sizeof(*dev)); 1122 dev->nlink = nlink; 1123 dev->rdev = (major << 8) | (minor & 0xff) | 1124 ((minor & ~0xff) << 12); 1125 SQUASHFS_SWAP_DEV_INODE_HEADER(dev, inode); 1126 TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink); 1127 } 1128 else if(type == SQUASHFS_LCHRDEV_TYPE || type == SQUASHFS_LBLKDEV_TYPE) { 1129 struct squashfs_ldev_inode_header *dev = &inode_header.ldev; 1130 unsigned int major = major(buf->st_rdev); 1131 unsigned int minor = minor(buf->st_rdev); 1132 1133 if(major > 0xfff) { 1134 ERROR("Major %d out of range in device node %s, " 1135 "truncating to %d\n", major, filename, 1136 major & 0xfff); 1137 major &= 0xfff; 1138 } 1139 if(minor > 0xfffff) { 1140 ERROR("Minor %d out of range in device node %s, " 1141 "truncating to %d\n", minor, filename, 1142 minor & 0xfffff); 1143 minor &= 0xfffff; 1144 } 1145 inode = get_inode(sizeof(*dev)); 1146 dev->nlink = nlink; 1147 dev->rdev = (major << 8) | (minor & 0xff) | 1148 ((minor & ~0xff) << 12); 1149 dev->xattr = xattr; 1150 SQUASHFS_SWAP_LDEV_INODE_HEADER(dev, inode); 1151 TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink); 1152 } 1153 else if(type == SQUASHFS_SYMLINK_TYPE) { 1154 struct squashfs_symlink_inode_header *symlink = &inode_header.symlink; 1155 int byte = strlen(dir_ent->inode->symlink); 1156 size_t off = offsetof(struct squashfs_symlink_inode_header, symlink); 1157 1158 inode = get_inode(sizeof(*symlink) + byte); 1159 symlink->nlink = nlink; 1160 symlink->symlink_size = byte; 1161 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode); 1162 strncpy(inode + off, dir_ent->inode->symlink, byte); 1163 TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte, 1164 nlink); 1165 } 1166 else if(type == SQUASHFS_LSYMLINK_TYPE) { 1167 struct squashfs_symlink_inode_header *symlink = &inode_header.symlink; 1168 int byte = strlen(dir_ent->inode->symlink); 1169 size_t off = offsetof(struct squashfs_symlink_inode_header, symlink); 1170 1171 inode = get_inode(sizeof(*symlink) + byte + 1172 sizeof(unsigned int)); 1173 symlink->nlink = nlink; 1174 symlink->symlink_size = byte; 1175 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode); 1176 strncpy(inode + off, dir_ent->inode->symlink, byte); 1177 SQUASHFS_SWAP_INTS(&xattr, inode + off + byte, 1); 1178 TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte, 1179 nlink); 1180 } 1181 else if(type == SQUASHFS_FIFO_TYPE || type == SQUASHFS_SOCKET_TYPE) { 1182 struct squashfs_ipc_inode_header *ipc = &inode_header.ipc; 1183 1184 inode = get_inode(sizeof(*ipc)); 1185 ipc->nlink = nlink; 1186 SQUASHFS_SWAP_IPC_INODE_HEADER(ipc, inode); 1187 TRACE("ipc inode, type %s, nlink %d\n", type == 1188 SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink); 1189 } 1190 else if(type == SQUASHFS_LFIFO_TYPE || type == SQUASHFS_LSOCKET_TYPE) { 1191 struct squashfs_lipc_inode_header *ipc = &inode_header.lipc; 1192 1193 inode = get_inode(sizeof(*ipc)); 1194 ipc->nlink = nlink; 1195 ipc->xattr = xattr; 1196 SQUASHFS_SWAP_LIPC_INODE_HEADER(ipc, inode); 1197 TRACE("ipc inode, type %s, nlink %d\n", type == 1198 SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink); 1199 } else 1200 BAD_ERROR("Unrecognised inode %d in create_inode\n", type); 1201 1202 *i_no = MKINODE(inode); 1203 inode_count ++; 1204 1205 TRACE("Created inode 0x%llx, type %d, uid %d, guid %d\n", *i_no, type, 1206 base->uid, base->guid); 1207 1208 return TRUE; 1209 } 1210 1211 1212 void add_dir(squashfs_inode inode, unsigned int inode_number, char *name, 1213 int type, struct directory *dir) 1214 { 1215 unsigned char *buff; 1216 struct squashfs_dir_entry idir; 1217 unsigned int start_block = inode >> 16; 1218 unsigned int offset = inode & 0xffff; 1219 unsigned int size = strlen(name); 1220 size_t name_off = offsetof(struct squashfs_dir_entry, name); 1221 1222 if(size > SQUASHFS_NAME_LEN) { 1223 size = SQUASHFS_NAME_LEN; 1224 ERROR("Filename is greater than %d characters, truncating! ..." 1225 "\n", SQUASHFS_NAME_LEN); 1226 } 1227 1228 if(dir->p + sizeof(struct squashfs_dir_entry) + size + 1229 sizeof(struct squashfs_dir_header) 1230 >= dir->buff + dir->size) { 1231 buff = realloc(dir->buff, dir->size += SQUASHFS_METADATA_SIZE); 1232 if(buff == NULL) 1233 MEM_ERROR(); 1234 1235 dir->p = (dir->p - dir->buff) + buff; 1236 if(dir->entry_count_p) 1237 dir->entry_count_p = (dir->entry_count_p - dir->buff + 1238 buff); 1239 dir->index_count_p = dir->index_count_p - dir->buff + buff; 1240 dir->buff = buff; 1241 } 1242 1243 if(dir->entry_count == 256 || start_block != dir->start_block || 1244 ((dir->entry_count_p != NULL) && 1245 ((dir->p + sizeof(struct squashfs_dir_entry) + size - 1246 dir->index_count_p) > SQUASHFS_METADATA_SIZE)) || 1247 ((long long) inode_number - dir->inode_number) > 32767 1248 || ((long long) inode_number - dir->inode_number) 1249 < -32768) { 1250 if(dir->entry_count_p) { 1251 struct squashfs_dir_header dir_header; 1252 1253 if((dir->p + sizeof(struct squashfs_dir_entry) + size - 1254 dir->index_count_p) > 1255 SQUASHFS_METADATA_SIZE) { 1256 if(dir->i_count % I_COUNT_SIZE == 0) { 1257 dir->index = realloc(dir->index, 1258 (dir->i_count + I_COUNT_SIZE) * 1259 sizeof(struct cached_dir_index)); 1260 if(dir->index == NULL) 1261 MEM_ERROR(); 1262 } 1263 dir->index[dir->i_count].index.index = 1264 dir->p - dir->buff; 1265 dir->index[dir->i_count].index.size = size - 1; 1266 dir->index[dir->i_count++].name = name; 1267 dir->i_size += sizeof(struct squashfs_dir_index) 1268 + size; 1269 dir->index_count_p = dir->p; 1270 } 1271 1272 dir_header.count = dir->entry_count - 1; 1273 dir_header.start_block = dir->start_block; 1274 dir_header.inode_number = dir->inode_number; 1275 SQUASHFS_SWAP_DIR_HEADER(&dir_header, 1276 dir->entry_count_p); 1277 1278 } 1279 1280 1281 dir->entry_count_p = dir->p; 1282 dir->start_block = start_block; 1283 dir->entry_count = 0; 1284 dir->inode_number = inode_number; 1285 dir->p += sizeof(struct squashfs_dir_header); 1286 } 1287 1288 idir.offset = offset; 1289 idir.type = type; 1290 idir.size = size - 1; 1291 idir.inode_number = ((long long) inode_number - dir->inode_number); 1292 SQUASHFS_SWAP_DIR_ENTRY(&idir, dir->p); 1293 strncpy((char *) dir->p + name_off, name, size); 1294 dir->p += sizeof(struct squashfs_dir_entry) + size; 1295 dir->entry_count ++; 1296 } 1297 1298 1299 void write_dir(squashfs_inode *inode, struct dir_info *dir_info, 1300 struct directory *dir) 1301 { 1302 unsigned int dir_size = dir->p - dir->buff; 1303 int data_space = directory_cache_size - directory_cache_bytes; 1304 unsigned int directory_block, directory_offset, i_count, index; 1305 unsigned short c_byte; 1306 1307 if(data_space < dir_size) { 1308 int realloc_size = directory_cache_size == 0 ? 1309 ((dir_size + SQUASHFS_METADATA_SIZE) & 1310 ~(SQUASHFS_METADATA_SIZE - 1)) : dir_size - data_space; 1311 1312 void *dc = realloc(directory_data_cache, 1313 directory_cache_size + realloc_size); 1314 if(dc == NULL) 1315 MEM_ERROR(); 1316 directory_cache_size += realloc_size; 1317 directory_data_cache = dc; 1318 } 1319 1320 if(dir_size) { 1321 struct squashfs_dir_header dir_header; 1322 1323 dir_header.count = dir->entry_count - 1; 1324 dir_header.start_block = dir->start_block; 1325 dir_header.inode_number = dir->inode_number; 1326 SQUASHFS_SWAP_DIR_HEADER(&dir_header, dir->entry_count_p); 1327 memcpy(directory_data_cache + directory_cache_bytes, dir->buff, 1328 dir_size); 1329 } 1330 directory_offset = directory_cache_bytes; 1331 directory_block = directory_bytes; 1332 directory_cache_bytes += dir_size; 1333 i_count = 0; 1334 index = SQUASHFS_METADATA_SIZE - directory_offset; 1335 1336 while(1) { 1337 while(i_count < dir->i_count && 1338 dir->index[i_count].index.index < index) 1339 dir->index[i_count++].index.start_block = 1340 directory_bytes; 1341 index += SQUASHFS_METADATA_SIZE; 1342 1343 if(directory_cache_bytes < SQUASHFS_METADATA_SIZE) 1344 break; 1345 1346 if((directory_size - directory_bytes) < 1347 ((SQUASHFS_METADATA_SIZE << 1) + 2)) { 1348 void *dt = realloc(directory_table, 1349 directory_size + (SQUASHFS_METADATA_SIZE << 1) 1350 + 2); 1351 if(dt == NULL) 1352 MEM_ERROR(); 1353 directory_size += SQUASHFS_METADATA_SIZE << 1; 1354 directory_table = dt; 1355 } 1356 1357 c_byte = mangle(directory_table + directory_bytes + 1358 BLOCK_OFFSET, directory_data_cache, 1359 SQUASHFS_METADATA_SIZE, SQUASHFS_METADATA_SIZE, 1360 noI, 0); 1361 TRACE("Directory block @ 0x%x, size %d\n", directory_bytes, 1362 c_byte); 1363 SQUASHFS_SWAP_SHORTS(&c_byte, 1364 directory_table + directory_bytes, 1); 1365 directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + 1366 BLOCK_OFFSET; 1367 total_directory_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET; 1368 memmove(directory_data_cache, directory_data_cache + 1369 SQUASHFS_METADATA_SIZE, directory_cache_bytes - 1370 SQUASHFS_METADATA_SIZE); 1371 directory_cache_bytes -= SQUASHFS_METADATA_SIZE; 1372 } 1373 1374 create_inode(inode, dir_info, dir_info->dir_ent, SQUASHFS_DIR_TYPE, 1375 dir_size + 3, directory_block, directory_offset, NULL, NULL, 1376 dir, 0); 1377 1378 #ifdef SQUASHFS_TRACE 1379 { 1380 unsigned char *dirp; 1381 int count; 1382 1383 TRACE("Directory contents of inode 0x%llx\n", *inode); 1384 dirp = dir->buff; 1385 while(dirp < dir->p) { 1386 char buffer[SQUASHFS_NAME_LEN + 1]; 1387 struct squashfs_dir_entry idir, *idirp; 1388 struct squashfs_dir_header dirh; 1389 SQUASHFS_SWAP_DIR_HEADER((struct squashfs_dir_header *) dirp, 1390 &dirh); 1391 count = dirh.count + 1; 1392 dirp += sizeof(struct squashfs_dir_header); 1393 1394 TRACE("\tStart block 0x%x, count %d\n", 1395 dirh.start_block, count); 1396 1397 while(count--) { 1398 idirp = (struct squashfs_dir_entry *) dirp; 1399 SQUASHFS_SWAP_DIR_ENTRY(idirp, &idir); 1400 strncpy(buffer, idirp->name, idir.size + 1); 1401 buffer[idir.size + 1] = '\0'; 1402 TRACE("\t\tname %s, inode offset 0x%x, type " 1403 "%d\n", buffer, idir.offset, idir.type); 1404 dirp += sizeof(struct squashfs_dir_entry) + idir.size + 1405 1; 1406 } 1407 } 1408 } 1409 #endif 1410 dir_count ++; 1411 } 1412 1413 1414 static struct file_buffer *get_fragment(struct fragment *fragment) 1415 { 1416 struct squashfs_fragment_entry *disk_fragment; 1417 struct file_buffer *buffer, *compressed_buffer; 1418 long long start_block; 1419 int res, size, index = fragment->index; 1420 char locked; 1421 1422 /* 1423 * Lookup fragment block in cache. 1424 * If the fragment block doesn't exist, then get the compressed version 1425 * from the writer cache or off disk, and decompress it. 1426 * 1427 * This routine has two things which complicate the code: 1428 * 1429 * 1. Multiple threads can simultaneously lookup/create the 1430 * same buffer. This means a buffer needs to be "locked" 1431 * when it is being filled in, to prevent other threads from 1432 * using it when it is not ready. This is because we now do 1433 * fragment duplicate checking in parallel. 1434 * 2. We have two caches which need to be checked for the 1435 * presence of fragment blocks: the normal fragment cache 1436 * and a "reserve" cache. The reserve cache is used to 1437 * prevent an unnecessary pipeline stall when the fragment cache 1438 * is full of fragments waiting to be compressed. 1439 */ 1440 1441 if(fragment->index == SQUASHFS_INVALID_FRAG) 1442 return NULL; 1443 1444 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex); 1445 pthread_mutex_lock(&dup_mutex); 1446 1447 again: 1448 buffer = cache_lookup_nowait(fragment_buffer, index, &locked); 1449 if(buffer) { 1450 pthread_mutex_unlock(&dup_mutex); 1451 if(locked) 1452 /* got a buffer being filled in. Wait for it */ 1453 cache_wait_unlock(buffer); 1454 goto finished; 1455 } 1456 1457 /* not in fragment cache, is it in the reserve cache? */ 1458 buffer = cache_lookup_nowait(reserve_cache, index, &locked); 1459 if(buffer) { 1460 pthread_mutex_unlock(&dup_mutex); 1461 if(locked) 1462 /* got a buffer being filled in. Wait for it */ 1463 cache_wait_unlock(buffer); 1464 goto finished; 1465 } 1466 1467 /* in neither cache, try to get it from the fragment cache */ 1468 buffer = cache_get_nowait(fragment_buffer, index); 1469 if(!buffer) { 1470 /* 1471 * no room, get it from the reserve cache, this is 1472 * dimensioned so it will always have space (no more than 1473 * processors + 1 can have an outstanding reserve buffer) 1474 */ 1475 buffer = cache_get_nowait(reserve_cache, index); 1476 if(!buffer) { 1477 /* failsafe */ 1478 ERROR("no space in reserve cache\n"); 1479 goto again; 1480 } 1481 } 1482 1483 pthread_mutex_unlock(&dup_mutex); 1484 1485 compressed_buffer = cache_lookup(fwriter_buffer, index); 1486 1487 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 1488 pthread_mutex_lock(&fragment_mutex); 1489 disk_fragment = &fragment_table[index]; 1490 size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size); 1491 start_block = disk_fragment->start_block; 1492 pthread_cleanup_pop(1); 1493 1494 if(SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size)) { 1495 int error; 1496 char *data; 1497 1498 if(compressed_buffer) 1499 data = compressed_buffer->data; 1500 else { 1501 data = read_from_disk(start_block, size); 1502 if(data == NULL) { 1503 ERROR("Failed to read fragment from output" 1504 " filesystem\n"); 1505 BAD_ERROR("Output filesystem corrupted?\n"); 1506 } 1507 } 1508 1509 res = compressor_uncompress(comp, buffer->data, data, size, 1510 block_size, &error); 1511 if(res == -1) 1512 BAD_ERROR("%s uncompress failed with error code %d\n", 1513 comp->name, error); 1514 } else if(compressed_buffer) 1515 memcpy(buffer->data, compressed_buffer->data, size); 1516 else { 1517 res = read_fs_bytes(fd, start_block, size, buffer->data); 1518 if(res == 0) { 1519 ERROR("Failed to read fragment from output " 1520 "filesystem\n"); 1521 BAD_ERROR("Output filesystem corrupted?\n"); 1522 } 1523 } 1524 1525 cache_unlock(buffer); 1526 cache_block_put(compressed_buffer); 1527 1528 finished: 1529 pthread_cleanup_pop(0); 1530 1531 return buffer; 1532 } 1533 1534 1535 unsigned short get_fragment_checksum(struct file_info *file) 1536 { 1537 struct file_buffer *frag_buffer; 1538 struct append_file *append; 1539 int res, index = file->fragment->index; 1540 unsigned short checksum; 1541 1542 if(index == SQUASHFS_INVALID_FRAG) 1543 return 0; 1544 1545 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex); 1546 pthread_mutex_lock(&dup_mutex); 1547 res = file->have_frag_checksum; 1548 checksum = file->fragment_checksum; 1549 pthread_cleanup_pop(1); 1550 1551 if(res) 1552 return checksum; 1553 1554 frag_buffer = get_fragment(file->fragment); 1555 1556 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex); 1557 1558 for(append = file_mapping[index]; append; append = append->next) { 1559 int offset = append->file->fragment->offset; 1560 int size = append->file->fragment->size; 1561 unsigned short cksum = 1562 get_checksum_mem(frag_buffer->data + offset, size); 1563 1564 if(file == append->file) 1565 checksum = cksum; 1566 1567 pthread_mutex_lock(&dup_mutex); 1568 append->file->fragment_checksum = cksum; 1569 append->file->have_frag_checksum = TRUE; 1570 pthread_mutex_unlock(&dup_mutex); 1571 } 1572 1573 cache_block_put(frag_buffer); 1574 pthread_cleanup_pop(0); 1575 1576 return checksum; 1577 } 1578 1579 1580 void lock_fragments() 1581 { 1582 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 1583 pthread_mutex_lock(&fragment_mutex); 1584 fragments_locked = TRUE; 1585 pthread_cleanup_pop(1); 1586 } 1587 1588 1589 void unlock_fragments() 1590 { 1591 int frg, size; 1592 struct file_buffer *write_buffer; 1593 1594 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 1595 pthread_mutex_lock(&fragment_mutex); 1596 1597 /* 1598 * Note queue_empty() is inherently racy with respect to concurrent 1599 * queue get and pushes. We avoid this because we're holding the 1600 * fragment_mutex which ensures no other threads can be using the 1601 * queue at this time. 1602 */ 1603 while(!queue_empty(locked_fragment)) { 1604 write_buffer = queue_get(locked_fragment); 1605 frg = write_buffer->block; 1606 size = SQUASHFS_COMPRESSED_SIZE_BLOCK(fragment_table[frg].size); 1607 fragment_table[frg].start_block = bytes; 1608 write_buffer->block = bytes; 1609 bytes += size; 1610 fragments_outstanding --; 1611 queue_put(to_writer, write_buffer); 1612 TRACE("fragment_locked writing fragment %d, compressed size %d" 1613 "\n", frg, size); 1614 } 1615 fragments_locked = FALSE; 1616 pthread_cleanup_pop(1); 1617 } 1618 1619 /* Called with the fragment_mutex locked */ 1620 void add_pending_fragment(struct file_buffer *write_buffer, int c_byte, 1621 int fragment) 1622 { 1623 fragment_table[fragment].size = c_byte; 1624 write_buffer->block = fragment; 1625 1626 queue_put(locked_fragment, write_buffer); 1627 } 1628 1629 1630 void write_fragment(struct file_buffer *fragment) 1631 { 1632 if(fragment == NULL) 1633 return; 1634 1635 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 1636 pthread_mutex_lock(&fragment_mutex); 1637 fragment_table[fragment->block].unused = 0; 1638 fragments_outstanding ++; 1639 queue_put(to_frag, fragment); 1640 pthread_cleanup_pop(1); 1641 } 1642 1643 1644 struct file_buffer *allocate_fragment() 1645 { 1646 struct file_buffer *fragment = cache_get(fragment_buffer, fragments); 1647 1648 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 1649 pthread_mutex_lock(&fragment_mutex); 1650 1651 if(fragments % FRAG_SIZE == 0) { 1652 void *ft = realloc(fragment_table, (fragments + 1653 FRAG_SIZE) * sizeof(struct squashfs_fragment_entry)); 1654 if(ft == NULL) 1655 MEM_ERROR(); 1656 fragment_table = ft; 1657 } 1658 1659 fragment->size = 0; 1660 fragment->block = fragments ++; 1661 1662 pthread_cleanup_pop(1); 1663 1664 return fragment; 1665 } 1666 1667 1668 static struct fragment empty_fragment = {SQUASHFS_INVALID_FRAG, 0, 0}; 1669 1670 1671 void free_fragment(struct fragment *fragment) 1672 { 1673 if(fragment != &empty_fragment) 1674 free(fragment); 1675 } 1676 1677 1678 struct fragment *get_and_fill_fragment(struct file_buffer *file_buffer, 1679 struct dir_ent *dir_ent) 1680 { 1681 struct fragment *ffrg; 1682 struct file_buffer **fragment; 1683 1684 if(file_buffer == NULL || file_buffer->size == 0) 1685 return &empty_fragment; 1686 1687 fragment = eval_frag_actions(root_dir, dir_ent); 1688 1689 if((*fragment) && (*fragment)->size + file_buffer->size > block_size) { 1690 write_fragment(*fragment); 1691 *fragment = NULL; 1692 } 1693 1694 ffrg = malloc(sizeof(struct fragment)); 1695 if(ffrg == NULL) 1696 MEM_ERROR(); 1697 1698 if(*fragment == NULL) 1699 *fragment = allocate_fragment(); 1700 1701 ffrg->index = (*fragment)->block; 1702 ffrg->offset = (*fragment)->size; 1703 ffrg->size = file_buffer->size; 1704 memcpy((*fragment)->data + (*fragment)->size, file_buffer->data, 1705 file_buffer->size); 1706 (*fragment)->size += file_buffer->size; 1707 1708 return ffrg; 1709 } 1710 1711 1712 long long generic_write_table(int length, void *buffer, int length2, 1713 void *buffer2, int uncompressed) 1714 { 1715 int meta_blocks = (length + SQUASHFS_METADATA_SIZE - 1) / 1716 SQUASHFS_METADATA_SIZE; 1717 long long *list, start_bytes; 1718 int compressed_size, i, list_size = meta_blocks * sizeof(long long); 1719 unsigned short c_byte; 1720 char cbuffer[(SQUASHFS_METADATA_SIZE << 2) + 2]; 1721 1722 #ifdef SQUASHFS_TRACE 1723 long long obytes = bytes; 1724 int olength = length; 1725 #endif 1726 1727 list = malloc(list_size); 1728 if(list == NULL) 1729 MEM_ERROR(); 1730 1731 for(i = 0; i < meta_blocks; i++) { 1732 int avail_bytes = length > SQUASHFS_METADATA_SIZE ? 1733 SQUASHFS_METADATA_SIZE : length; 1734 c_byte = mangle(cbuffer + BLOCK_OFFSET, buffer + i * 1735 SQUASHFS_METADATA_SIZE , avail_bytes, 1736 SQUASHFS_METADATA_SIZE, uncompressed, 0); 1737 SQUASHFS_SWAP_SHORTS(&c_byte, cbuffer, 1); 1738 list[i] = bytes; 1739 compressed_size = SQUASHFS_COMPRESSED_SIZE(c_byte) + 1740 BLOCK_OFFSET; 1741 TRACE("block %d @ 0x%llx, compressed size %d\n", i, bytes, 1742 compressed_size); 1743 write_destination(fd, bytes, compressed_size, cbuffer); 1744 bytes += compressed_size; 1745 total_bytes += avail_bytes; 1746 length -= avail_bytes; 1747 } 1748 1749 start_bytes = bytes; 1750 if(length2) { 1751 write_destination(fd, bytes, length2, buffer2); 1752 bytes += length2; 1753 total_bytes += length2; 1754 } 1755 1756 SQUASHFS_INSWAP_LONG_LONGS(list, meta_blocks); 1757 write_destination(fd, bytes, list_size, list); 1758 bytes += list_size; 1759 total_bytes += list_size; 1760 1761 TRACE("generic_write_table: total uncompressed %d compressed %lld\n", 1762 olength, bytes - obytes); 1763 1764 free(list); 1765 1766 return start_bytes; 1767 } 1768 1769 1770 long long write_fragment_table() 1771 { 1772 unsigned int frag_bytes = SQUASHFS_FRAGMENT_BYTES(fragments); 1773 int i; 1774 1775 TRACE("write_fragment_table: fragments %d, frag_bytes %d\n", fragments, 1776 frag_bytes); 1777 for(i = 0; i < fragments; i++) { 1778 TRACE("write_fragment_table: fragment %d, start_block 0x%llx, " 1779 "size %d\n", i, fragment_table[i].start_block, 1780 fragment_table[i].size); 1781 SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]); 1782 } 1783 1784 return generic_write_table(frag_bytes, fragment_table, 0, NULL, noF); 1785 } 1786 1787 1788 char read_from_file_buffer[SQUASHFS_FILE_MAX_SIZE]; 1789 static char *read_from_disk(long long start, unsigned int avail_bytes) 1790 { 1791 int res; 1792 1793 res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer); 1794 if(res == 0) 1795 return NULL; 1796 1797 return read_from_file_buffer; 1798 } 1799 1800 1801 char read_from_file_buffer2[SQUASHFS_FILE_MAX_SIZE]; 1802 char *read_from_disk2(long long start, unsigned int avail_bytes) 1803 { 1804 int res; 1805 1806 res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer2); 1807 if(res == 0) 1808 return NULL; 1809 1810 return read_from_file_buffer2; 1811 } 1812 1813 1814 /* 1815 * Compute 16 bit BSD checksum over the data 1816 */ 1817 unsigned short get_checksum(char *buff, int bytes, unsigned short chksum) 1818 { 1819 unsigned char *b = (unsigned char *) buff; 1820 1821 while(bytes --) { 1822 chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1; 1823 chksum += *b++; 1824 } 1825 1826 return chksum; 1827 } 1828 1829 1830 unsigned short get_checksum_disk(long long start, long long l, 1831 unsigned int *blocks) 1832 { 1833 unsigned short chksum = 0; 1834 unsigned int bytes; 1835 struct file_buffer *write_buffer; 1836 int i; 1837 1838 for(i = 0; l; i++) { 1839 bytes = SQUASHFS_COMPRESSED_SIZE_BLOCK(blocks[i]); 1840 if(bytes == 0) /* sparse block */ 1841 continue; 1842 write_buffer = cache_lookup(bwriter_buffer, start); 1843 if(write_buffer) { 1844 chksum = get_checksum(write_buffer->data, bytes, 1845 chksum); 1846 cache_block_put(write_buffer); 1847 } else { 1848 void *data = read_from_disk(start, bytes); 1849 if(data == NULL) { 1850 ERROR("Failed to checksum data from output" 1851 " filesystem\n"); 1852 BAD_ERROR("Output filesystem corrupted?\n"); 1853 } 1854 1855 chksum = get_checksum(data, bytes, chksum); 1856 } 1857 1858 l -= bytes; 1859 start += bytes; 1860 } 1861 1862 return chksum; 1863 } 1864 1865 1866 unsigned short get_checksum_mem(char *buff, int bytes) 1867 { 1868 return get_checksum(buff, bytes, 0); 1869 } 1870 1871 1872 unsigned short get_checksum_mem_buffer(struct file_buffer *file_buffer) 1873 { 1874 if(file_buffer == NULL) 1875 return 0; 1876 else 1877 return get_checksum(file_buffer->data, file_buffer->size, 0); 1878 } 1879 1880 1881 #define DUP_HASH(a) (a & 0xffff) 1882 void add_file(long long start, long long file_size, long long file_bytes, 1883 unsigned int *block_listp, int blocks, unsigned int fragment, 1884 int offset, int bytes) 1885 { 1886 struct fragment *frg; 1887 unsigned int *block_list = block_listp; 1888 struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; 1889 struct append_file *append_file; 1890 struct file_info *file; 1891 1892 if(!duplicate_checking || file_size == 0) 1893 return; 1894 1895 for(; dupl_ptr; dupl_ptr = dupl_ptr->next) { 1896 if(file_size != dupl_ptr->file_size) 1897 continue; 1898 if(blocks != 0 && start != dupl_ptr->start) 1899 continue; 1900 if(fragment != dupl_ptr->fragment->index) 1901 continue; 1902 if(fragment != SQUASHFS_INVALID_FRAG && (offset != 1903 dupl_ptr->fragment->offset || bytes != 1904 dupl_ptr->fragment->size)) 1905 continue; 1906 return; 1907 } 1908 1909 frg = malloc(sizeof(struct fragment)); 1910 if(frg == NULL) 1911 MEM_ERROR(); 1912 1913 frg->index = fragment; 1914 frg->offset = offset; 1915 frg->size = bytes; 1916 1917 file = add_non_dup(file_size, file_bytes, block_list, start, frg, 0, 0, 1918 FALSE, FALSE); 1919 1920 if(fragment == SQUASHFS_INVALID_FRAG) 1921 return; 1922 1923 append_file = malloc(sizeof(struct append_file)); 1924 if(append_file == NULL) 1925 MEM_ERROR(); 1926 1927 append_file->file = file; 1928 append_file->next = file_mapping[fragment]; 1929 file_mapping[fragment] = append_file; 1930 } 1931 1932 1933 int pre_duplicate(long long file_size) 1934 { 1935 struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; 1936 1937 for(; dupl_ptr; dupl_ptr = dupl_ptr->next) 1938 if(dupl_ptr->file_size == file_size) 1939 return TRUE; 1940 1941 return FALSE; 1942 } 1943 1944 1945 struct file_info *add_non_dup(long long file_size, long long bytes, 1946 unsigned int *block_list, long long start, struct fragment *fragment, 1947 unsigned short checksum, unsigned short fragment_checksum, 1948 int checksum_flag, int checksum_frag_flag) 1949 { 1950 struct file_info *dupl_ptr = malloc(sizeof(struct file_info)); 1951 1952 if(dupl_ptr == NULL) 1953 MEM_ERROR(); 1954 1955 dupl_ptr->file_size = file_size; 1956 dupl_ptr->bytes = bytes; 1957 dupl_ptr->block_list = block_list; 1958 dupl_ptr->start = start; 1959 dupl_ptr->fragment = fragment; 1960 dupl_ptr->checksum = checksum; 1961 dupl_ptr->fragment_checksum = fragment_checksum; 1962 dupl_ptr->have_frag_checksum = checksum_frag_flag; 1963 dupl_ptr->have_checksum = checksum_flag; 1964 1965 pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex); 1966 pthread_mutex_lock(&dup_mutex); 1967 dupl_ptr->next = dupl[DUP_HASH(file_size)]; 1968 dupl[DUP_HASH(file_size)] = dupl_ptr; 1969 dup_files ++; 1970 pthread_cleanup_pop(1); 1971 1972 return dupl_ptr; 1973 } 1974 1975 1976 struct fragment *frag_duplicate(struct file_buffer *file_buffer, char *dont_put) 1977 { 1978 struct file_info *dupl_ptr; 1979 struct file_buffer *buffer; 1980 struct file_info *dupl_start = file_buffer->dupl_start; 1981 long long file_size = file_buffer->file_size; 1982 unsigned short checksum = file_buffer->checksum; 1983 int res; 1984 1985 if(file_buffer->duplicate) { 1986 TRACE("Found duplicate file, fragment %d, size %d, offset %d, " 1987 "checksum 0x%x\n", dupl_start->fragment->index, 1988 file_size, dupl_start->fragment->offset, checksum); 1989 *dont_put = TRUE; 1990 return dupl_start->fragment; 1991 } else { 1992 *dont_put = FALSE; 1993 dupl_ptr = dupl[DUP_HASH(file_size)]; 1994 } 1995 1996 for(; dupl_ptr && dupl_ptr != dupl_start; dupl_ptr = dupl_ptr->next) { 1997 if(file_size == dupl_ptr->file_size && file_size == 1998 dupl_ptr->fragment->size) { 1999 if(get_fragment_checksum(dupl_ptr) == checksum) { 2000 buffer = get_fragment(dupl_ptr->fragment); 2001 res = memcmp(file_buffer->data, buffer->data + 2002 dupl_ptr->fragment->offset, file_size); 2003 cache_block_put(buffer); 2004 if(res == 0) 2005 break; 2006 } 2007 } 2008 } 2009 2010 if(!dupl_ptr || dupl_ptr == dupl_start) 2011 return NULL; 2012 2013 TRACE("Found duplicate file, fragment %d, size %d, offset %d, " 2014 "checksum 0x%x\n", dupl_ptr->fragment->index, file_size, 2015 dupl_ptr->fragment->offset, checksum); 2016 2017 return dupl_ptr->fragment; 2018 } 2019 2020 2021 struct file_info *duplicate(long long file_size, long long bytes, 2022 unsigned int **block_list, long long *start, struct fragment **fragment, 2023 struct file_buffer *file_buffer, int blocks, unsigned short checksum, 2024 int checksum_flag) 2025 { 2026 struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; 2027 int frag_bytes = file_buffer ? file_buffer->size : 0; 2028 unsigned short fragment_checksum = file_buffer ? 2029 file_buffer->checksum : 0; 2030 2031 for(; dupl_ptr; dupl_ptr = dupl_ptr->next) 2032 if(file_size == dupl_ptr->file_size && bytes == dupl_ptr->bytes 2033 && frag_bytes == dupl_ptr->fragment->size) { 2034 long long target_start, dup_start = dupl_ptr->start; 2035 int block; 2036 2037 if(memcmp(*block_list, dupl_ptr->block_list, blocks * 2038 sizeof(unsigned int)) != 0) 2039 continue; 2040 2041 if(checksum_flag == FALSE) { 2042 checksum = get_checksum_disk(*start, bytes, 2043 *block_list); 2044 checksum_flag = TRUE; 2045 } 2046 2047 if(!dupl_ptr->have_checksum) { 2048 dupl_ptr->checksum = 2049 get_checksum_disk(dupl_ptr->start, 2050 dupl_ptr->bytes, dupl_ptr->block_list); 2051 dupl_ptr->have_checksum = TRUE; 2052 } 2053 2054 if(checksum != dupl_ptr->checksum || 2055 fragment_checksum != 2056 get_fragment_checksum(dupl_ptr)) 2057 continue; 2058 2059 target_start = *start; 2060 for(block = 0; block < blocks; block ++) { 2061 int size = SQUASHFS_COMPRESSED_SIZE_BLOCK 2062 ((*block_list)[block]); 2063 struct file_buffer *target_buffer = NULL; 2064 struct file_buffer *dup_buffer = NULL; 2065 char *target_data, *dup_data; 2066 int res; 2067 2068 if(size == 0) 2069 continue; 2070 target_buffer = cache_lookup(bwriter_buffer, 2071 target_start); 2072 if(target_buffer) 2073 target_data = target_buffer->data; 2074 else { 2075 target_data = 2076 read_from_disk(target_start, 2077 size); 2078 if(target_data == NULL) { 2079 ERROR("Failed to read data from" 2080 " output filesystem\n"); 2081 BAD_ERROR("Output filesystem" 2082 " corrupted?\n"); 2083 } 2084 } 2085 2086 dup_buffer = cache_lookup(bwriter_buffer, 2087 dup_start); 2088 if(dup_buffer) 2089 dup_data = dup_buffer->data; 2090 else { 2091 dup_data = read_from_disk2(dup_start, 2092 size); 2093 if(dup_data == NULL) { 2094 ERROR("Failed to read data from" 2095 " output filesystem\n"); 2096 BAD_ERROR("Output filesystem" 2097 " corrupted?\n"); 2098 } 2099 } 2100 2101 res = memcmp(target_data, dup_data, size); 2102 cache_block_put(target_buffer); 2103 cache_block_put(dup_buffer); 2104 if(res != 0) 2105 break; 2106 target_start += size; 2107 dup_start += size; 2108 } 2109 if(block == blocks) { 2110 struct file_buffer *frag_buffer = 2111 get_fragment(dupl_ptr->fragment); 2112 2113 if(frag_bytes == 0 || 2114 memcmp(file_buffer->data, 2115 frag_buffer->data + 2116 dupl_ptr->fragment->offset, 2117 frag_bytes) == 0) { 2118 TRACE("Found duplicate file, start " 2119 "0x%llx, size %lld, checksum " 2120 "0x%x, fragment %d, size %d, " 2121 "offset %d, checksum 0x%x\n", 2122 dupl_ptr->start, 2123 dupl_ptr->bytes, 2124 dupl_ptr->checksum, 2125 dupl_ptr->fragment->index, 2126 frag_bytes, 2127 dupl_ptr->fragment->offset, 2128 fragment_checksum); 2129 *block_list = dupl_ptr->block_list; 2130 *start = dupl_ptr->start; 2131 *fragment = dupl_ptr->fragment; 2132 cache_block_put(frag_buffer); 2133 return 0; 2134 } 2135 cache_block_put(frag_buffer); 2136 } 2137 } 2138 2139 2140 return add_non_dup(file_size, bytes, *block_list, *start, *fragment, 2141 checksum, fragment_checksum, checksum_flag, TRUE); 2142 } 2143 2144 2145 static inline int is_fragment(struct inode_info *inode) 2146 { 2147 off_t file_size = inode->buf.st_size; 2148 2149 /* 2150 * If this block is to be compressed differently to the 2151 * fragment compression then it cannot be a fragment 2152 */ 2153 if(inode->noF != noF) 2154 return FALSE; 2155 2156 return !inode->no_fragments && file_size && (file_size < block_size || 2157 (inode->always_use_fragments && file_size & (block_size - 1))); 2158 } 2159 2160 2161 void put_file_buffer(struct file_buffer *file_buffer) 2162 { 2163 /* 2164 * Decide where to send the file buffer: 2165 * - compressible non-fragment blocks go to the deflate threads, 2166 * - fragments go to the process fragment threads, 2167 * - all others go directly to the main thread 2168 */ 2169 if(file_buffer->error) { 2170 file_buffer->fragment = 0; 2171 seq_queue_put(to_main, file_buffer); 2172 } else if (file_buffer->file_size == 0) 2173 seq_queue_put(to_main, file_buffer); 2174 else if(file_buffer->fragment) 2175 queue_put(to_process_frag, file_buffer); 2176 else 2177 queue_put(to_deflate, file_buffer); 2178 } 2179 2180 2181 static int seq = 0; 2182 void reader_read_process(struct dir_ent *dir_ent) 2183 { 2184 long long bytes = 0; 2185 struct inode_info *inode = dir_ent->inode; 2186 struct file_buffer *prev_buffer = NULL, *file_buffer; 2187 int status, byte, res, child; 2188 int file = pseudo_exec_file(get_pseudo_file(inode->pseudo_id), &child); 2189 2190 if(!file) { 2191 file_buffer = cache_get_nohash(reader_buffer); 2192 file_buffer->sequence = seq ++; 2193 goto read_err; 2194 } 2195 2196 while(1) { 2197 file_buffer = cache_get_nohash(reader_buffer); 2198 file_buffer->sequence = seq ++; 2199 file_buffer->noD = inode->noD; 2200 2201 byte = read_bytes(file, file_buffer->data, block_size); 2202 if(byte == -1) 2203 goto read_err2; 2204 2205 file_buffer->size = byte; 2206 file_buffer->file_size = -1; 2207 file_buffer->error = FALSE; 2208 file_buffer->fragment = FALSE; 2209 bytes += byte; 2210 2211 if(byte == 0) 2212 break; 2213 2214 /* 2215 * Update progress bar size. This is done 2216 * on every block rather than waiting for all blocks to be 2217 * read incase write_file_process() is running in parallel 2218 * with this. Otherwise the current progress bar position 2219 * may get ahead of the progress bar size. 2220 */ 2221 progress_bar_size(1); 2222 2223 if(prev_buffer) 2224 put_file_buffer(prev_buffer); 2225 prev_buffer = file_buffer; 2226 } 2227 2228 /* 2229 * Update inode file size now that the size of the dynamic pseudo file 2230 * is known. This is needed for the -info option. 2231 */ 2232 inode->buf.st_size = bytes; 2233 2234 res = waitpid(child, &status, 0); 2235 close(file); 2236 2237 if(res == -1 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) 2238 goto read_err; 2239 2240 if(prev_buffer == NULL) 2241 prev_buffer = file_buffer; 2242 else { 2243 cache_block_put(file_buffer); 2244 seq --; 2245 } 2246 prev_buffer->file_size = bytes; 2247 prev_buffer->fragment = is_fragment(inode); 2248 put_file_buffer(prev_buffer); 2249 2250 return; 2251 2252 read_err2: 2253 close(file); 2254 read_err: 2255 if(prev_buffer) { 2256 cache_block_put(file_buffer); 2257 seq --; 2258 file_buffer = prev_buffer; 2259 } 2260 file_buffer->error = TRUE; 2261 put_file_buffer(file_buffer); 2262 } 2263 2264 2265 void reader_read_file(struct dir_ent *dir_ent) 2266 { 2267 struct stat *buf = &dir_ent->inode->buf, buf2; 2268 struct file_buffer *file_buffer; 2269 int blocks, file, res; 2270 long long bytes, read_size; 2271 struct inode_info *inode = dir_ent->inode; 2272 2273 if(inode->read) 2274 return; 2275 2276 inode->read = TRUE; 2277 again: 2278 bytes = 0; 2279 read_size = buf->st_size; 2280 blocks = (read_size + block_size - 1) >> block_log; 2281 2282 file = open(pathname_reader(dir_ent), O_RDONLY); 2283 if(file == -1) { 2284 file_buffer = cache_get_nohash(reader_buffer); 2285 file_buffer->sequence = seq ++; 2286 goto read_err2; 2287 } 2288 2289 do { 2290 file_buffer = cache_get_nohash(reader_buffer); 2291 file_buffer->file_size = read_size; 2292 file_buffer->sequence = seq ++; 2293 file_buffer->noD = inode->noD; 2294 file_buffer->error = FALSE; 2295 2296 /* 2297 * Always try to read block_size bytes from the file rather 2298 * than expected bytes (which will be less than the block_size 2299 * at the file tail) to check that the file hasn't grown 2300 * since being stated. If it is longer (or shorter) than 2301 * expected, then restat, and try again. Note the special 2302 * case where the file is an exact multiple of the block_size 2303 * is dealt with later. 2304 */ 2305 file_buffer->size = read_bytes(file, file_buffer->data, 2306 block_size); 2307 if(file_buffer->size == -1) 2308 goto read_err; 2309 2310 bytes += file_buffer->size; 2311 2312 if(blocks > 1) { 2313 /* non-tail block should be exactly block_size */ 2314 if(file_buffer->size < block_size) 2315 goto restat; 2316 2317 file_buffer->fragment = FALSE; 2318 put_file_buffer(file_buffer); 2319 } 2320 } while(-- blocks > 0); 2321 2322 /* Overall size including tail should match */ 2323 if(read_size != bytes) 2324 goto restat; 2325 2326 if(read_size && read_size % block_size == 0) { 2327 /* 2328 * Special case where we've not tried to read past the end of 2329 * the file. We expect to get EOF, i.e. the file isn't larger 2330 * than we expect. 2331 */ 2332 char buffer; 2333 int res; 2334 2335 res = read_bytes(file, &buffer, 1); 2336 if(res == -1) 2337 goto read_err; 2338 2339 if(res != 0) 2340 goto restat; 2341 } 2342 2343 file_buffer->fragment = is_fragment(inode); 2344 put_file_buffer(file_buffer); 2345 2346 close(file); 2347 2348 return; 2349 2350 restat: 2351 res = fstat(file, &buf2); 2352 if(res == -1) { 2353 ERROR("Cannot stat dir/file %s because %s\n", 2354 pathname_reader(dir_ent), strerror(errno)); 2355 goto read_err; 2356 } 2357 2358 if(read_size != buf2.st_size) { 2359 close(file); 2360 memcpy(buf, &buf2, sizeof(struct stat)); 2361 file_buffer->error = 2; 2362 put_file_buffer(file_buffer); 2363 goto again; 2364 } 2365 read_err: 2366 close(file); 2367 read_err2: 2368 file_buffer->error = TRUE; 2369 put_file_buffer(file_buffer); 2370 } 2371 2372 2373 void reader_scan(struct dir_info *dir) { 2374 struct dir_ent *dir_ent = dir->list; 2375 2376 for(; dir_ent; dir_ent = dir_ent->next) { 2377 struct stat *buf = &dir_ent->inode->buf; 2378 if(dir_ent->inode->root_entry) 2379 continue; 2380 2381 if(IS_PSEUDO_PROCESS(dir_ent->inode)) { 2382 reader_read_process(dir_ent); 2383 continue; 2384 } 2385 2386 switch(buf->st_mode & S_IFMT) { 2387 case S_IFREG: 2388 reader_read_file(dir_ent); 2389 break; 2390 case S_IFDIR: 2391 reader_scan(dir_ent->dir); 2392 break; 2393 } 2394 } 2395 } 2396 2397 2398 void *reader(void *arg) 2399 { 2400 if(!sorted) 2401 reader_scan(queue_get(to_reader)); 2402 else { 2403 int i; 2404 struct priority_entry *entry; 2405 2406 queue_get(to_reader); 2407 for(i = 65535; i >= 0; i--) 2408 for(entry = priority_list[i]; entry; 2409 entry = entry->next) 2410 reader_read_file(entry->dir); 2411 } 2412 2413 pthread_exit(NULL); 2414 } 2415 2416 2417 void *writer(void *arg) 2418 { 2419 while(1) { 2420 struct file_buffer *file_buffer = queue_get(to_writer); 2421 off_t off; 2422 2423 if(file_buffer == NULL) { 2424 queue_put(from_writer, NULL); 2425 continue; 2426 } 2427 2428 off = file_buffer->block; 2429 2430 pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex); 2431 pthread_mutex_lock(&pos_mutex); 2432 2433 if(lseek(fd, off, SEEK_SET) == -1) { 2434 ERROR("writer: Lseek on destination failed because " 2435 "%s, offset=0x%llx\n", strerror(errno), off); 2436 BAD_ERROR("Probably out of space on output " 2437 "%s\n", block_device ? "block device" : 2438 "filesystem"); 2439 } 2440 2441 if(write_bytes(fd, file_buffer->data, 2442 file_buffer->size) == -1) 2443 BAD_ERROR("Failed to write to output %s\n", 2444 block_device ? "block device" : "filesystem"); 2445 2446 pthread_cleanup_pop(1); 2447 2448 cache_block_put(file_buffer); 2449 } 2450 } 2451 2452 2453 int all_zero(struct file_buffer *file_buffer) 2454 { 2455 int i; 2456 long entries = file_buffer->size / sizeof(long); 2457 long *p = (long *) file_buffer->data; 2458 2459 for(i = 0; i < entries && p[i] == 0; i++); 2460 2461 if(i == entries) { 2462 for(i = file_buffer->size & ~(sizeof(long) - 1); 2463 i < file_buffer->size && file_buffer->data[i] == 0; 2464 i++); 2465 2466 return i == file_buffer->size; 2467 } 2468 2469 return 0; 2470 } 2471 2472 2473 void *deflator(void *arg) 2474 { 2475 struct file_buffer *write_buffer = cache_get_nohash(bwriter_buffer); 2476 void *stream = NULL; 2477 int res; 2478 2479 res = compressor_init(comp, &stream, block_size, 1); 2480 if(res) 2481 BAD_ERROR("deflator:: compressor_init failed\n"); 2482 2483 while(1) { 2484 struct file_buffer *file_buffer = queue_get(to_deflate); 2485 2486 if(sparse_files && all_zero(file_buffer)) { 2487 file_buffer->c_byte = 0; 2488 seq_queue_put(to_main, file_buffer); 2489 } else { 2490 write_buffer->c_byte = mangle2(stream, 2491 write_buffer->data, file_buffer->data, 2492 file_buffer->size, block_size, 2493 file_buffer->noD, 1); 2494 write_buffer->sequence = file_buffer->sequence; 2495 write_buffer->file_size = file_buffer->file_size; 2496 write_buffer->block = file_buffer->block; 2497 write_buffer->size = SQUASHFS_COMPRESSED_SIZE_BLOCK 2498 (write_buffer->c_byte); 2499 write_buffer->fragment = FALSE; 2500 write_buffer->error = FALSE; 2501 cache_block_put(file_buffer); 2502 seq_queue_put(to_main, write_buffer); 2503 write_buffer = cache_get_nohash(bwriter_buffer); 2504 } 2505 } 2506 } 2507 2508 2509 void *frag_deflator(void *arg) 2510 { 2511 void *stream = NULL; 2512 int res; 2513 2514 res = compressor_init(comp, &stream, block_size, 1); 2515 if(res) 2516 BAD_ERROR("frag_deflator:: compressor_init failed\n"); 2517 2518 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 2519 2520 while(1) { 2521 int c_byte, compressed_size; 2522 struct file_buffer *file_buffer = queue_get(to_frag); 2523 struct file_buffer *write_buffer = 2524 cache_get(fwriter_buffer, file_buffer->block); 2525 2526 c_byte = mangle2(stream, write_buffer->data, file_buffer->data, 2527 file_buffer->size, block_size, noF, 1); 2528 compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte); 2529 write_buffer->size = compressed_size; 2530 pthread_mutex_lock(&fragment_mutex); 2531 if(fragments_locked == FALSE) { 2532 fragment_table[file_buffer->block].size = c_byte; 2533 fragment_table[file_buffer->block].start_block = bytes; 2534 write_buffer->block = bytes; 2535 bytes += compressed_size; 2536 fragments_outstanding --; 2537 queue_put(to_writer, write_buffer); 2538 pthread_mutex_unlock(&fragment_mutex); 2539 TRACE("Writing fragment %lld, uncompressed size %d, " 2540 "compressed size %d\n", file_buffer->block, 2541 file_buffer->size, compressed_size); 2542 } else { 2543 add_pending_fragment(write_buffer, c_byte, 2544 file_buffer->block); 2545 pthread_mutex_unlock(&fragment_mutex); 2546 } 2547 cache_block_put(file_buffer); 2548 } 2549 2550 pthread_cleanup_pop(0); 2551 } 2552 2553 2554 struct file_buffer *get_file_buffer() 2555 { 2556 struct file_buffer *file_buffer = seq_queue_get(to_main); 2557 2558 return file_buffer; 2559 } 2560 2561 2562 void write_file_empty(squashfs_inode *inode, struct dir_ent *dir_ent, 2563 struct file_buffer *file_buffer, int *duplicate_file) 2564 { 2565 file_count ++; 2566 *duplicate_file = FALSE; 2567 cache_block_put(file_buffer); 2568 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, 0, 0, 0, 2569 NULL, &empty_fragment, NULL, 0); 2570 } 2571 2572 2573 void write_file_frag(squashfs_inode *inode, struct dir_ent *dir_ent, 2574 struct file_buffer *file_buffer, int *duplicate_file) 2575 { 2576 int size = file_buffer->file_size; 2577 struct fragment *fragment; 2578 unsigned short checksum = file_buffer->checksum; 2579 char dont_put; 2580 2581 fragment = frag_duplicate(file_buffer, &dont_put); 2582 *duplicate_file = !fragment; 2583 if(!fragment) { 2584 fragment = get_and_fill_fragment(file_buffer, dir_ent); 2585 if(duplicate_checking) 2586 add_non_dup(size, 0, NULL, 0, fragment, 0, checksum, 2587 TRUE, TRUE); 2588 } 2589 2590 if(dont_put) 2591 free(file_buffer); 2592 else 2593 cache_block_put(file_buffer); 2594 2595 total_bytes += size; 2596 file_count ++; 2597 2598 inc_progress_bar(); 2599 2600 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, size, 0, 2601 0, NULL, fragment, NULL, 0); 2602 2603 if(!duplicate_checking) 2604 free_fragment(fragment); 2605 } 2606 2607 2608 int write_file_process(squashfs_inode *inode, struct dir_ent *dir_ent, 2609 struct file_buffer *read_buffer, int *duplicate_file) 2610 { 2611 long long read_size, file_bytes, start; 2612 struct fragment *fragment; 2613 unsigned int *block_list = NULL; 2614 int block = 0, status; 2615 long long sparse = 0; 2616 struct file_buffer *fragment_buffer = NULL; 2617 2618 *duplicate_file = FALSE; 2619 2620 lock_fragments(); 2621 2622 file_bytes = 0; 2623 start = bytes; 2624 while (1) { 2625 read_size = read_buffer->file_size; 2626 if(read_buffer->fragment) 2627 fragment_buffer = read_buffer; 2628 else { 2629 block_list = realloc(block_list, (block + 1) * 2630 sizeof(unsigned int)); 2631 if(block_list == NULL) 2632 MEM_ERROR(); 2633 block_list[block ++] = read_buffer->c_byte; 2634 if(read_buffer->c_byte) { 2635 read_buffer->block = bytes; 2636 bytes += read_buffer->size; 2637 cache_hash(read_buffer, read_buffer->block); 2638 file_bytes += read_buffer->size; 2639 queue_put(to_writer, read_buffer); 2640 } else { 2641 sparse += read_buffer->size; 2642 cache_block_put(read_buffer); 2643 } 2644 } 2645 inc_progress_bar(); 2646 2647 if(read_size != -1) 2648 break; 2649 2650 read_buffer = get_file_buffer(); 2651 if(read_buffer->error) 2652 goto read_err; 2653 } 2654 2655 unlock_fragments(); 2656 fragment = get_and_fill_fragment(fragment_buffer, dir_ent); 2657 2658 if(duplicate_checking) 2659 add_non_dup(read_size, file_bytes, block_list, start, fragment, 2660 0, fragment_buffer ? fragment_buffer->checksum : 0, 2661 FALSE, TRUE); 2662 cache_block_put(fragment_buffer); 2663 file_count ++; 2664 total_bytes += read_size; 2665 2666 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, start, 2667 block, block_list, fragment, NULL, sparse); 2668 2669 if(duplicate_checking == FALSE) { 2670 free(block_list); 2671 free_fragment(fragment); 2672 } 2673 2674 return 0; 2675 2676 read_err: 2677 dec_progress_bar(block); 2678 status = read_buffer->error; 2679 bytes = start; 2680 if(!block_device) { 2681 int res; 2682 2683 queue_put(to_writer, NULL); 2684 if(queue_get(from_writer) != 0) 2685 EXIT_MKSQUASHFS(); 2686 res = ftruncate(fd, bytes); 2687 if(res != 0) 2688 BAD_ERROR("Failed to truncate dest file because %s\n", 2689 strerror(errno)); 2690 } 2691 unlock_fragments(); 2692 free(block_list); 2693 cache_block_put(read_buffer); 2694 return status; 2695 } 2696 2697 2698 int write_file_blocks_dup(squashfs_inode *inode, struct dir_ent *dir_ent, 2699 struct file_buffer *read_buffer, int *duplicate_file) 2700 { 2701 int block, thresh; 2702 long long read_size = read_buffer->file_size; 2703 long long file_bytes, dup_start, start; 2704 struct fragment *fragment; 2705 struct file_info *dupl_ptr; 2706 int blocks = (read_size + block_size - 1) >> block_log; 2707 unsigned int *block_list, *block_listp; 2708 struct file_buffer **buffer_list; 2709 int status; 2710 long long sparse = 0; 2711 struct file_buffer *fragment_buffer = NULL; 2712 2713 block_list = malloc(blocks * sizeof(unsigned int)); 2714 if(block_list == NULL) 2715 MEM_ERROR(); 2716 block_listp = block_list; 2717 2718 buffer_list = malloc(blocks * sizeof(struct file_buffer *)); 2719 if(buffer_list == NULL) 2720 MEM_ERROR(); 2721 2722 lock_fragments(); 2723 2724 file_bytes = 0; 2725 start = dup_start = bytes; 2726 thresh = blocks > bwriter_size ? blocks - bwriter_size : 0; 2727 2728 for(block = 0; block < blocks;) { 2729 if(read_buffer->fragment) { 2730 block_list[block] = 0; 2731 buffer_list[block] = NULL; 2732 fragment_buffer = read_buffer; 2733 blocks = read_size >> block_log; 2734 } else { 2735 block_list[block] = read_buffer->c_byte; 2736 2737 if(read_buffer->c_byte) { 2738 read_buffer->block = bytes; 2739 bytes += read_buffer->size; 2740 file_bytes += read_buffer->size; 2741 cache_hash(read_buffer, read_buffer->block); 2742 if(block < thresh) { 2743 buffer_list[block] = NULL; 2744 queue_put(to_writer, read_buffer); 2745 } else 2746 buffer_list[block] = read_buffer; 2747 } else { 2748 buffer_list[block] = NULL; 2749 sparse += read_buffer->size; 2750 cache_block_put(read_buffer); 2751 } 2752 } 2753 inc_progress_bar(); 2754 2755 if(++block < blocks) { 2756 read_buffer = get_file_buffer(); 2757 if(read_buffer->error) 2758 goto read_err; 2759 } 2760 } 2761 2762 dupl_ptr = duplicate(read_size, file_bytes, &block_listp, &dup_start, 2763 &fragment, fragment_buffer, blocks, 0, FALSE); 2764 2765 if(dupl_ptr) { 2766 *duplicate_file = FALSE; 2767 for(block = thresh; block < blocks; block ++) 2768 if(buffer_list[block]) 2769 queue_put(to_writer, buffer_list[block]); 2770 fragment = get_and_fill_fragment(fragment_buffer, dir_ent); 2771 dupl_ptr->fragment = fragment; 2772 } else { 2773 *duplicate_file = TRUE; 2774 for(block = thresh; block < blocks; block ++) 2775 cache_block_put(buffer_list[block]); 2776 bytes = start; 2777 if(thresh && !block_device) { 2778 int res; 2779 2780 queue_put(to_writer, NULL); 2781 if(queue_get(from_writer) != 0) 2782 EXIT_MKSQUASHFS(); 2783 res = ftruncate(fd, bytes); 2784 if(res != 0) 2785 BAD_ERROR("Failed to truncate dest file because" 2786 " %s\n", strerror(errno)); 2787 } 2788 } 2789 2790 unlock_fragments(); 2791 cache_block_put(fragment_buffer); 2792 free(buffer_list); 2793 file_count ++; 2794 total_bytes += read_size; 2795 2796 /* 2797 * sparse count is needed to ensure squashfs correctly reports a 2798 * a smaller block count on stat calls to sparse files. This is 2799 * to ensure intelligent applications like cp correctly handle the 2800 * file as a sparse file. If the file in the original filesystem isn't 2801 * stored as a sparse file then still store it sparsely in squashfs, but 2802 * report it as non-sparse on stat calls to preserve semantics 2803 */ 2804 if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size) 2805 sparse = 0; 2806 2807 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, 2808 dup_start, blocks, block_listp, fragment, NULL, sparse); 2809 2810 if(*duplicate_file == TRUE) 2811 free(block_list); 2812 2813 return 0; 2814 2815 read_err: 2816 dec_progress_bar(block); 2817 status = read_buffer->error; 2818 bytes = start; 2819 if(thresh && !block_device) { 2820 int res; 2821 2822 queue_put(to_writer, NULL); 2823 if(queue_get(from_writer) != 0) 2824 EXIT_MKSQUASHFS(); 2825 res = ftruncate(fd, bytes); 2826 if(res != 0) 2827 BAD_ERROR("Failed to truncate dest file because %s\n", 2828 strerror(errno)); 2829 } 2830 unlock_fragments(); 2831 for(blocks = thresh; blocks < block; blocks ++) 2832 cache_block_put(buffer_list[blocks]); 2833 free(buffer_list); 2834 free(block_list); 2835 cache_block_put(read_buffer); 2836 return status; 2837 } 2838 2839 2840 int write_file_blocks(squashfs_inode *inode, struct dir_ent *dir_ent, 2841 struct file_buffer *read_buffer, int *dup) 2842 { 2843 long long read_size = read_buffer->file_size; 2844 long long file_bytes, start; 2845 struct fragment *fragment; 2846 unsigned int *block_list; 2847 int block, status; 2848 int blocks = (read_size + block_size - 1) >> block_log; 2849 long long sparse = 0; 2850 struct file_buffer *fragment_buffer = NULL; 2851 2852 if(pre_duplicate(read_size)) 2853 return write_file_blocks_dup(inode, dir_ent, read_buffer, dup); 2854 2855 *dup = FALSE; 2856 2857 block_list = malloc(blocks * sizeof(unsigned int)); 2858 if(block_list == NULL) 2859 MEM_ERROR(); 2860 2861 lock_fragments(); 2862 2863 file_bytes = 0; 2864 /* ANDROID CHANGES START*/ 2865 #ifdef ANDROID 2866 if (align_4k_blocks && bytes % 4096) { 2867 bytes += 4096 - (bytes % 4096); 2868 } 2869 #endif 2870 /* ANDROID CHANGES END */ 2871 start = bytes; 2872 for(block = 0; block < blocks;) { 2873 if(read_buffer->fragment) { 2874 block_list[block] = 0; 2875 fragment_buffer = read_buffer; 2876 blocks = read_size >> block_log; 2877 } else { 2878 block_list[block] = read_buffer->c_byte; 2879 if(read_buffer->c_byte) { 2880 read_buffer->block = bytes; 2881 bytes += read_buffer->size; 2882 cache_hash(read_buffer, read_buffer->block); 2883 file_bytes += read_buffer->size; 2884 queue_put(to_writer, read_buffer); 2885 } else { 2886 sparse += read_buffer->size; 2887 cache_block_put(read_buffer); 2888 } 2889 } 2890 inc_progress_bar(); 2891 2892 if(++block < blocks) { 2893 read_buffer = get_file_buffer(); 2894 if(read_buffer->error) 2895 goto read_err; 2896 } 2897 } 2898 2899 unlock_fragments(); 2900 fragment = get_and_fill_fragment(fragment_buffer, dir_ent); 2901 2902 if(duplicate_checking) 2903 add_non_dup(read_size, file_bytes, block_list, start, fragment, 2904 0, fragment_buffer ? fragment_buffer->checksum : 0, 2905 FALSE, TRUE); 2906 cache_block_put(fragment_buffer); 2907 file_count ++; 2908 total_bytes += read_size; 2909 2910 /* 2911 * sparse count is needed to ensure squashfs correctly reports a 2912 * a smaller block count on stat calls to sparse files. This is 2913 * to ensure intelligent applications like cp correctly handle the 2914 * file as a sparse file. If the file in the original filesystem isn't 2915 * stored as a sparse file then still store it sparsely in squashfs, but 2916 * report it as non-sparse on stat calls to preserve semantics 2917 */ 2918 if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size) 2919 sparse = 0; 2920 2921 create_inode(inode, NULL, dir_ent, SQUASHFS_FILE_TYPE, read_size, start, 2922 blocks, block_list, fragment, NULL, sparse); 2923 2924 if(duplicate_checking == FALSE) { 2925 free(block_list); 2926 free_fragment(fragment); 2927 } 2928 2929 return 0; 2930 2931 read_err: 2932 dec_progress_bar(block); 2933 status = read_buffer->error; 2934 bytes = start; 2935 if(!block_device) { 2936 int res; 2937 2938 queue_put(to_writer, NULL); 2939 if(queue_get(from_writer) != 0) 2940 EXIT_MKSQUASHFS(); 2941 res = ftruncate(fd, bytes); 2942 if(res != 0) 2943 BAD_ERROR("Failed to truncate dest file because %s\n", 2944 strerror(errno)); 2945 } 2946 unlock_fragments(); 2947 free(block_list); 2948 cache_block_put(read_buffer); 2949 return status; 2950 } 2951 2952 2953 void write_file(squashfs_inode *inode, struct dir_ent *dir, int *dup) 2954 { 2955 int status; 2956 struct file_buffer *read_buffer; 2957 2958 again: 2959 read_buffer = get_file_buffer(); 2960 status = read_buffer->error; 2961 2962 if(status) 2963 cache_block_put(read_buffer); 2964 else if(read_buffer->file_size == -1) 2965 status = write_file_process(inode, dir, read_buffer, dup); 2966 else if(read_buffer->file_size == 0) 2967 write_file_empty(inode, dir, read_buffer, dup); 2968 else if(read_buffer->fragment && read_buffer->c_byte) 2969 write_file_frag(inode, dir, read_buffer, dup); 2970 else 2971 status = write_file_blocks(inode, dir, read_buffer, dup); 2972 2973 if(status == 2) { 2974 ERROR("File %s changed size while reading filesystem, " 2975 "attempting to re-read\n", pathname(dir)); 2976 goto again; 2977 } else if(status == 1) { 2978 ERROR_START("Failed to read file %s", pathname(dir)); 2979 ERROR_EXIT(", creating empty file\n"); 2980 write_file_empty(inode, dir, NULL, dup); 2981 } 2982 } 2983 2984 2985 #define BUFF_SIZE 512 2986 char *name; 2987 char *basename_r(); 2988 2989 char *getbase(char *pathname) 2990 { 2991 static char *b_buffer = NULL; 2992 static int b_size = BUFF_SIZE; 2993 char *result; 2994 2995 if(b_buffer == NULL) { 2996 b_buffer = malloc(b_size); 2997 if(b_buffer == NULL) 2998 MEM_ERROR(); 2999 } 3000 3001 while(1) { 3002 if(*pathname != '/') { 3003 result = getcwd(b_buffer, b_size); 3004 if(result == NULL && errno != ERANGE) 3005 BAD_ERROR("Getcwd failed in getbase\n"); 3006 3007 /* enough room for pathname + "/" + '\0' terminator? */ 3008 if(result && strlen(pathname) + 2 <= 3009 b_size - strlen(b_buffer)) { 3010 strcat(strcat(b_buffer, "/"), pathname); 3011 break; 3012 } 3013 } else if(strlen(pathname) < b_size) { 3014 strcpy(b_buffer, pathname); 3015 break; 3016 } 3017 3018 /* Buffer not large enough, realloc and try again */ 3019 b_buffer = realloc(b_buffer, b_size += BUFF_SIZE); 3020 if(b_buffer == NULL) 3021 MEM_ERROR(); 3022 } 3023 3024 name = b_buffer; 3025 if(((result = basename_r()) == NULL) || (strcmp(result, "..") == 0)) 3026 return NULL; 3027 else 3028 return result; 3029 } 3030 3031 3032 char *basename_r() 3033 { 3034 char *s; 3035 char *p; 3036 int n = 1; 3037 3038 for(;;) { 3039 s = name; 3040 if(*name == '\0') 3041 return NULL; 3042 if(*name != '/') { 3043 while(*name != '\0' && *name != '/') name++; 3044 n = name - s; 3045 } 3046 while(*name == '/') name++; 3047 if(strncmp(s, ".", n) == 0) 3048 continue; 3049 if((*name == '\0') || (strncmp(s, "..", n) == 0) || 3050 ((p = basename_r()) == NULL)) { 3051 s[n] = '\0'; 3052 return s; 3053 } 3054 if(strcmp(p, "..") == 0) 3055 continue; 3056 return p; 3057 } 3058 } 3059 3060 3061 struct inode_info *lookup_inode3(struct stat *buf, int pseudo, int id, 3062 char *symlink, int bytes) 3063 { 3064 int ino_hash = INODE_HASH(buf->st_dev, buf->st_ino); 3065 struct inode_info *inode; 3066 3067 /* 3068 * Look-up inode in hash table, if it already exists we have a 3069 * hard-link, so increment the nlink count and return it. 3070 * Don't do the look-up for directories because we don't hard-link 3071 * directories. 3072 */ 3073 if ((buf->st_mode & S_IFMT) != S_IFDIR) { 3074 for(inode = inode_info[ino_hash]; inode; inode = inode->next) { 3075 if(memcmp(buf, &inode->buf, sizeof(struct stat)) == 0) { 3076 inode->nlink ++; 3077 return inode; 3078 } 3079 } 3080 } 3081 3082 inode = malloc(sizeof(struct inode_info) + bytes); 3083 if(inode == NULL) 3084 MEM_ERROR(); 3085 3086 if(bytes) 3087 memcpy(&inode->symlink, symlink, bytes); 3088 memcpy(&inode->buf, buf, sizeof(struct stat)); 3089 inode->read = FALSE; 3090 inode->root_entry = FALSE; 3091 inode->pseudo_file = pseudo; 3092 inode->pseudo_id = id; 3093 inode->inode = SQUASHFS_INVALID_BLK; 3094 inode->nlink = 1; 3095 inode->inode_number = 0; 3096 3097 /* 3098 * Copy filesystem wide defaults into inode, these filesystem 3099 * wide defaults may be altered on an individual inode basis by 3100 * user specified actions 3101 * 3102 */ 3103 inode->no_fragments = no_fragments; 3104 inode->always_use_fragments = always_use_fragments; 3105 3106 /* ANDROID CHANGES START*/ 3107 #ifdef ANDROID 3108 /* Check the whitelist */ 3109 inode->noD = whitelisted(buf); 3110 #else 3111 inode->noD = noD; 3112 #endif 3113 /* ANDROID CHANGES END */ 3114 3115 inode->noF = noF; 3116 3117 inode->next = inode_info[ino_hash]; 3118 inode_info[ino_hash] = inode; 3119 3120 return inode; 3121 } 3122 3123 3124 static inline struct inode_info *lookup_inode2(struct stat *buf, int pseudo, int id) 3125 { 3126 return lookup_inode3(buf, pseudo, id, NULL, 0); 3127 } 3128 3129 3130 static inline struct inode_info *lookup_inode(struct stat *buf) 3131 { 3132 return lookup_inode2(buf, 0, 0); 3133 } 3134 3135 3136 static inline void alloc_inode_no(struct inode_info *inode, unsigned int use_this) 3137 { 3138 if (inode->inode_number == 0) { 3139 inode->inode_number = use_this ? : inode_no ++; 3140 if((inode->buf.st_mode & S_IFMT) == S_IFREG) 3141 progress_bar_size((inode->buf.st_size + block_size - 1) 3142 >> block_log); 3143 } 3144 } 3145 3146 3147 static inline struct dir_ent *create_dir_entry(char *name, char *source_name, 3148 char *nonstandard_pathname, struct dir_info *dir) 3149 { 3150 struct dir_ent *dir_ent = malloc(sizeof(struct dir_ent)); 3151 if(dir_ent == NULL) 3152 MEM_ERROR(); 3153 3154 dir_ent->name = name; 3155 dir_ent->source_name = source_name; 3156 dir_ent->nonstandard_pathname = nonstandard_pathname; 3157 dir_ent->our_dir = dir; 3158 dir_ent->inode = NULL; 3159 dir_ent->next = NULL; 3160 /* ANDROID CHANGES START*/ 3161 #ifdef ANDROID 3162 dir_ent->capabilities = 0; 3163 #endif 3164 /* ANDROID CHANGES END */ 3165 3166 return dir_ent; 3167 } 3168 3169 3170 static inline void add_dir_entry(struct dir_ent *dir_ent, struct dir_info *sub_dir, 3171 struct inode_info *inode_info) 3172 { 3173 struct dir_info *dir = dir_ent->our_dir; 3174 3175 if(sub_dir) 3176 sub_dir->dir_ent = dir_ent; 3177 3178 /* ANDROID CHANGES START*/ 3179 #ifdef ANDROID 3180 if (android_config) { 3181 if (mount_point) { 3182 char *mounted_path; 3183 char *rel_path; 3184 3185 alloc_mounted_path(mount_point, subpathname(dir_ent), &mounted_path); 3186 rel_path = mounted_path; 3187 while (rel_path && *rel_path == '/') 3188 rel_path++; 3189 android_fs_config(fs_config_func, rel_path, &inode_info->buf, target_out_path, &dir_ent->capabilities); 3190 free(mounted_path); 3191 } else { 3192 android_fs_config(fs_config_func, pathname(dir_ent), &inode_info->buf, target_out_path, &dir_ent->capabilities); 3193 } 3194 } 3195 #endif 3196 /* ANDROID CHANGES END */ 3197 3198 dir_ent->inode = inode_info; 3199 dir_ent->dir = sub_dir; 3200 3201 dir_ent->next = dir->list; 3202 dir->list = dir_ent; 3203 dir->count++; 3204 } 3205 3206 static inline void add_dir_entry2(char *name, char *source_name, 3207 char *nonstandard_pathname, struct dir_info *sub_dir, 3208 struct inode_info *inode_info, struct dir_info *dir) 3209 { 3210 struct dir_ent *dir_ent = create_dir_entry(name, source_name, 3211 nonstandard_pathname, dir); 3212 3213 3214 add_dir_entry(dir_ent, sub_dir, inode_info); 3215 } 3216 3217 3218 static inline void free_dir_entry(struct dir_ent *dir_ent) 3219 { 3220 if(dir_ent->name) 3221 free(dir_ent->name); 3222 3223 if(dir_ent->source_name) 3224 free(dir_ent->source_name); 3225 3226 if(dir_ent->nonstandard_pathname) 3227 free(dir_ent->nonstandard_pathname); 3228 3229 /* if this entry has been associated with an inode, then we need 3230 * to update the inode nlink count. Orphaned inodes are harmless, and 3231 * is easier to leave them than go to the bother of deleting them */ 3232 if(dir_ent->inode && !dir_ent->inode->root_entry) 3233 dir_ent->inode->nlink --; 3234 3235 free(dir_ent); 3236 } 3237 3238 3239 static inline void add_excluded(struct dir_info *dir) 3240 { 3241 dir->excluded ++; 3242 } 3243 3244 3245 void dir_scan(squashfs_inode *inode, char *pathname, 3246 struct dir_ent *(_readdir)(struct dir_info *), int progress) 3247 { 3248 struct stat buf; 3249 struct dir_ent *dir_ent; 3250 /* ANDROID CHANGES START*/ 3251 #ifdef ANDROID 3252 uint64_t caps = 0; 3253 #endif 3254 /* ANDROID CHANGES END */ 3255 3256 root_dir = dir_scan1(pathname, "", paths, _readdir, 1); 3257 if(root_dir == NULL) 3258 return; 3259 3260 /* Create root directory dir_ent and associated inode, and connect 3261 * it to the root directory dir_info structure */ 3262 dir_ent = create_dir_entry("", NULL, pathname, 3263 scan1_opendir("", "", 0)); 3264 3265 if(pathname[0] == '\0') { 3266 /* 3267 * dummy top level directory, if multiple sources specified on 3268 * command line 3269 */ 3270 memset(&buf, 0, sizeof(buf)); 3271 buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR; 3272 buf.st_uid = getuid(); 3273 buf.st_gid = getgid(); 3274 buf.st_mtime = time(NULL); 3275 buf.st_dev = 0; 3276 buf.st_ino = 0; 3277 dir_ent->inode = lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0); 3278 } else { 3279 if(lstat(pathname, &buf) == -1) 3280 /* source directory has disappeared? */ 3281 BAD_ERROR("Cannot stat source directory %s because %s\n", 3282 pathname, strerror(errno)); 3283 /* ANDROID CHANGES START*/ 3284 #ifdef ANDROID 3285 buf.st_mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH | S_IFDIR; // root mode 3286 buf.st_uid = 0; 3287 buf.st_gid = 0; 3288 buf.st_mtime = time(NULL); 3289 buf.st_dev = 0; 3290 buf.st_ino = 0; 3291 #endif 3292 /* ANDROID CHANGES END */ 3293 dir_ent->inode = lookup_inode(&buf); 3294 } 3295 3296 /* ANDROID CHANGES START*/ 3297 #ifdef ANDROID 3298 dir_ent->capabilities = caps; 3299 if (android_config) { 3300 android_fs_config(fs_config_func, "", &dir_ent->inode->buf, target_out_path, &dir_ent->capabilities); 3301 } 3302 #endif 3303 /* ANDROID CHANGES END */ 3304 3305 dir_ent->dir = root_dir; 3306 root_dir->dir_ent = dir_ent; 3307 3308 /* 3309 * Process most actions and any pseudo files 3310 */ 3311 if(actions() || get_pseudo()) 3312 dir_scan2(root_dir, get_pseudo()); 3313 3314 /* 3315 * Process move actions 3316 */ 3317 if(move_actions()) { 3318 dir_scan3(root_dir); 3319 do_move_actions(); 3320 } 3321 3322 /* 3323 * Process prune actions 3324 */ 3325 if(prune_actions()) 3326 dir_scan4(root_dir); 3327 3328 /* 3329 * Process empty actions 3330 */ 3331 if(empty_actions()) 3332 dir_scan5(root_dir); 3333 3334 /* 3335 * Sort directories and compute the inode numbers 3336 */ 3337 dir_scan6(root_dir); 3338 3339 alloc_inode_no(dir_ent->inode, root_inode_number); 3340 3341 eval_actions(root_dir, dir_ent); 3342 3343 if(sorted) 3344 generate_file_priorities(root_dir, 0, 3345 &root_dir->dir_ent->inode->buf); 3346 3347 if(appending) { 3348 sigset_t sigmask; 3349 3350 restore_thread = init_restore_thread(); 3351 sigemptyset(&sigmask); 3352 sigaddset(&sigmask, SIGINT); 3353 sigaddset(&sigmask, SIGTERM); 3354 sigaddset(&sigmask, SIGUSR1); 3355 if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1) 3356 BAD_ERROR("Failed to set signal mask\n"); 3357 write_destination(fd, SQUASHFS_START, 4, "\0\0\0\0"); 3358 } 3359 3360 queue_put(to_reader, root_dir); 3361 3362 set_progressbar_state(progress); 3363 3364 if(sorted) 3365 sort_files_and_write(root_dir); 3366 3367 dir_scan7(inode, root_dir); 3368 dir_ent->inode->inode = *inode; 3369 dir_ent->inode->type = SQUASHFS_DIR_TYPE; 3370 } 3371 3372 3373 /* 3374 * dir_scan1 routines... 3375 * These scan the source directories into memory for processing. 3376 * Exclude actions are processed here (in contrast to the other actions) 3377 * because they affect what is scanned. 3378 */ 3379 struct dir_info *scan1_opendir(char *pathname, char *subpath, int depth) 3380 { 3381 struct dir_info *dir; 3382 3383 dir = malloc(sizeof(struct dir_info)); 3384 if(dir == NULL) 3385 MEM_ERROR(); 3386 3387 if(pathname[0] != '\0') { 3388 dir->linuxdir = opendir(pathname); 3389 if(dir->linuxdir == NULL) { 3390 free(dir); 3391 return NULL; 3392 } 3393 } 3394 3395 dir->pathname = strdup(pathname); 3396 dir->subpath = strdup(subpath); 3397 dir->count = 0; 3398 dir->directory_count = 0; 3399 dir->dir_is_ldir = TRUE; 3400 dir->list = NULL; 3401 dir->depth = depth; 3402 dir->excluded = 0; 3403 3404 return dir; 3405 } 3406 3407 3408 struct dir_ent *scan1_encomp_readdir(struct dir_info *dir) 3409 { 3410 static int index = 0; 3411 3412 if(dir->count < old_root_entries) { 3413 int i; 3414 3415 for(i = 0; i < old_root_entries; i++) { 3416 if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE) 3417 dir->directory_count ++; 3418 add_dir_entry2(old_root_entry[i].name, NULL, NULL, NULL, 3419 &old_root_entry[i].inode, dir); 3420 } 3421 } 3422 3423 while(index < source) { 3424 char *basename = NULL; 3425 char *dir_name = getbase(source_path[index]); 3426 int pass = 1, res; 3427 3428 if(dir_name == NULL) { 3429 ERROR_START("Bad source directory %s", 3430 source_path[index]); 3431 ERROR_EXIT(" - skipping ...\n"); 3432 index ++; 3433 continue; 3434 } 3435 dir_name = strdup(dir_name); 3436 for(;;) { 3437 struct dir_ent *dir_ent = dir->list; 3438 3439 for(; dir_ent && strcmp(dir_ent->name, dir_name) != 0; 3440 dir_ent = dir_ent->next); 3441 if(dir_ent == NULL) 3442 break; 3443 ERROR("Source directory entry %s already used! - trying" 3444 " ", dir_name); 3445 if(pass == 1) 3446 basename = dir_name; 3447 else 3448 free(dir_name); 3449 res = asprintf(&dir_name, "%s_%d", basename, pass++); 3450 if(res == -1) 3451 BAD_ERROR("asprintf failed in " 3452 "scan1_encomp_readdir\n"); 3453 ERROR("%s\n", dir_name); 3454 } 3455 return create_dir_entry(dir_name, basename, 3456 strdup(source_path[index ++]), dir); 3457 } 3458 return NULL; 3459 } 3460 3461 3462 struct dir_ent *scan1_single_readdir(struct dir_info *dir) 3463 { 3464 struct dirent *d_name; 3465 int i; 3466 3467 if(dir->count < old_root_entries) { 3468 for(i = 0; i < old_root_entries; i++) { 3469 if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE) 3470 dir->directory_count ++; 3471 add_dir_entry2(old_root_entry[i].name, NULL, NULL, NULL, 3472 &old_root_entry[i].inode, dir); 3473 } 3474 } 3475 3476 if((d_name = readdir(dir->linuxdir)) != NULL) { 3477 char *basename = NULL; 3478 char *dir_name = strdup(d_name->d_name); 3479 int pass = 1, res; 3480 3481 for(;;) { 3482 struct dir_ent *dir_ent = dir->list; 3483 3484 for(; dir_ent && strcmp(dir_ent->name, dir_name) != 0; 3485 dir_ent = dir_ent->next); 3486 if(dir_ent == NULL) 3487 break; 3488 ERROR("Source directory entry %s already used! - trying" 3489 " ", dir_name); 3490 if (pass == 1) 3491 basename = dir_name; 3492 else 3493 free(dir_name); 3494 res = asprintf(&dir_name, "%s_%d", d_name->d_name, pass++); 3495 if(res == -1) 3496 BAD_ERROR("asprintf failed in " 3497 "scan1_single_readdir\n"); 3498 ERROR("%s\n", dir_name); 3499 } 3500 return create_dir_entry(dir_name, basename, NULL, dir); 3501 } 3502 3503 return NULL; 3504 } 3505 3506 3507 struct dir_ent *scan1_readdir(struct dir_info *dir) 3508 { 3509 struct dirent *d_name = readdir(dir->linuxdir); 3510 3511 return d_name ? 3512 create_dir_entry(strdup(d_name->d_name), NULL, NULL, dir) : 3513 NULL; 3514 } 3515 3516 3517 void scan1_freedir(struct dir_info *dir) 3518 { 3519 if(dir->pathname[0] != '\0') 3520 closedir(dir->linuxdir); 3521 } 3522 3523 3524 struct dir_info *dir_scan1(char *filename, char *subpath, 3525 struct pathnames *paths, 3526 struct dir_ent *(_readdir)(struct dir_info *), int depth) 3527 { 3528 struct dir_info *dir = scan1_opendir(filename, subpath, depth); 3529 struct dir_ent *dir_ent; 3530 3531 if(dir == NULL) { 3532 ERROR_START("Could not open %s", filename); 3533 ERROR_EXIT(", skipping...\n"); 3534 return NULL; 3535 } 3536 3537 while((dir_ent = _readdir(dir))) { 3538 struct dir_info *sub_dir; 3539 struct stat buf; 3540 struct pathnames *new = NULL; 3541 char *filename = pathname(dir_ent); 3542 char *subpath = NULL; 3543 char *dir_name = dir_ent->name; 3544 3545 if(strcmp(dir_name, ".") == 0 || strcmp(dir_name, "..") == 0) { 3546 free_dir_entry(dir_ent); 3547 continue; 3548 } 3549 3550 if(lstat(filename, &buf) == -1) { 3551 ERROR_START("Cannot stat dir/file %s because %s", 3552 filename, strerror(errno)); 3553 ERROR_EXIT(", ignoring\n"); 3554 free_dir_entry(dir_ent); 3555 continue; 3556 } 3557 3558 if((buf.st_mode & S_IFMT) != S_IFREG && 3559 (buf.st_mode & S_IFMT) != S_IFDIR && 3560 (buf.st_mode & S_IFMT) != S_IFLNK && 3561 (buf.st_mode & S_IFMT) != S_IFCHR && 3562 (buf.st_mode & S_IFMT) != S_IFBLK && 3563 (buf.st_mode & S_IFMT) != S_IFIFO && 3564 (buf.st_mode & S_IFMT) != S_IFSOCK) { 3565 ERROR_START("File %s has unrecognised filetype %d", 3566 filename, buf.st_mode & S_IFMT); 3567 ERROR_EXIT(", ignoring\n"); 3568 free_dir_entry(dir_ent); 3569 continue; 3570 } 3571 3572 if((old_exclude && old_excluded(filename, &buf)) || 3573 (!old_exclude && excluded(dir_name, paths, &new))) { 3574 add_excluded(dir); 3575 free_dir_entry(dir_ent); 3576 continue; 3577 } 3578 3579 if(exclude_actions()) { 3580 subpath = subpathname(dir_ent); 3581 3582 if(eval_exclude_actions(dir_name, filename, subpath, 3583 &buf, depth, dir_ent)) { 3584 add_excluded(dir); 3585 free_dir_entry(dir_ent); 3586 continue; 3587 } 3588 } 3589 3590 switch(buf.st_mode & S_IFMT) { 3591 case S_IFDIR: 3592 if(subpath == NULL) 3593 subpath = subpathname(dir_ent); 3594 3595 sub_dir = dir_scan1(filename, subpath, new, 3596 scan1_readdir, depth + 1); 3597 if(sub_dir) { 3598 dir->directory_count ++; 3599 add_dir_entry(dir_ent, sub_dir, 3600 lookup_inode(&buf)); 3601 } else 3602 free_dir_entry(dir_ent); 3603 break; 3604 case S_IFLNK: { 3605 int byte; 3606 static char buff[65536]; /* overflow safe */ 3607 3608 byte = readlink(filename, buff, 65536); 3609 if(byte == -1) { 3610 ERROR_START("Failed to read symlink %s", 3611 filename); 3612 ERROR_EXIT(", ignoring\n"); 3613 } else if(byte == 65536) { 3614 ERROR_START("Symlink %s is greater than 65536 " 3615 "bytes!", filename); 3616 ERROR_EXIT(", ignoring\n"); 3617 } else { 3618 /* readlink doesn't 0 terminate the returned 3619 * path */ 3620 buff[byte] = '\0'; 3621 add_dir_entry(dir_ent, NULL, lookup_inode3(&buf, 3622 0, 0, buff, byte + 1)); 3623 } 3624 break; 3625 } 3626 default: 3627 add_dir_entry(dir_ent, NULL, lookup_inode(&buf)); 3628 } 3629 3630 free(new); 3631 } 3632 3633 scan1_freedir(dir); 3634 3635 return dir; 3636 } 3637 3638 3639 /* 3640 * dir_scan2 routines... 3641 * This processes most actions and any pseudo files 3642 */ 3643 struct dir_ent *scan2_readdir(struct dir_info *dir, struct dir_ent *dir_ent) 3644 { 3645 if (dir_ent == NULL) 3646 dir_ent = dir->list; 3647 else 3648 dir_ent = dir_ent->next; 3649 3650 for(; dir_ent && dir_ent->inode->root_entry; dir_ent = dir_ent->next); 3651 3652 return dir_ent; 3653 } 3654 3655 3656 struct dir_ent *scan2_lookup(struct dir_info *dir, char *name) 3657 { 3658 struct dir_ent *dir_ent = dir->list; 3659 3660 for(; dir_ent && strcmp(dir_ent->name, name) != 0; 3661 dir_ent = dir_ent->next); 3662 3663 return dir_ent; 3664 } 3665 3666 3667 void dir_scan2(struct dir_info *dir, struct pseudo *pseudo) 3668 { 3669 struct dir_ent *dir_ent = NULL; 3670 struct pseudo_entry *pseudo_ent; 3671 struct stat buf; 3672 static int pseudo_ino = 1; 3673 3674 while((dir_ent = scan2_readdir(dir, dir_ent)) != NULL) { 3675 struct inode_info *inode_info = dir_ent->inode; 3676 struct stat *buf = &inode_info->buf; 3677 char *name = dir_ent->name; 3678 3679 eval_actions(root_dir, dir_ent); 3680 3681 if((buf->st_mode & S_IFMT) == S_IFDIR) 3682 dir_scan2(dir_ent->dir, pseudo_subdir(name, pseudo)); 3683 } 3684 3685 while((pseudo_ent = pseudo_readdir(pseudo)) != NULL) { 3686 dir_ent = scan2_lookup(dir, pseudo_ent->name); 3687 if(pseudo_ent->dev->type == 'm') { 3688 struct stat *buf; 3689 if(dir_ent == NULL) { 3690 ERROR_START("Pseudo modify file \"%s\" does " 3691 "not exist in source filesystem.", 3692 pseudo_ent->pathname); 3693 ERROR_EXIT(" Ignoring.\n"); 3694 continue; 3695 } 3696 if(dir_ent->inode->root_entry) { 3697 ERROR_START("Pseudo modify file \"%s\" is a " 3698 "pre-existing file in the filesystem " 3699 "being appended to. It cannot be "\ 3700 "modified.", pseudo_ent->pathname); 3701 ERROR_EXIT(" Ignoring.\n"); 3702 continue; 3703 } 3704 buf = &dir_ent->inode->buf; 3705 buf->st_mode = (buf->st_mode & S_IFMT) | 3706 pseudo_ent->dev->mode; 3707 buf->st_uid = pseudo_ent->dev->uid; 3708 buf->st_gid = pseudo_ent->dev->gid; 3709 continue; 3710 } 3711 3712 if(dir_ent) { 3713 if(dir_ent->inode->root_entry) { 3714 ERROR_START("Pseudo file \"%s\" is a " 3715 "pre-existing file in the filesystem " 3716 "being appended to.", 3717 pseudo_ent->pathname); 3718 ERROR_EXIT(" Ignoring.\n"); 3719 } else { 3720 ERROR_START("Pseudo file \"%s\" exists in " 3721 "source filesystem \"%s\".", 3722 pseudo_ent->pathname, 3723 pathname(dir_ent)); 3724 ERROR_EXIT("\nIgnoring, exclude it (-e/-ef) to " 3725 "override.\n"); 3726 } 3727 continue; 3728 } 3729 3730 memset(&buf, 0, sizeof(buf)); 3731 buf.st_mode = pseudo_ent->dev->mode; 3732 buf.st_uid = pseudo_ent->dev->uid; 3733 buf.st_gid = pseudo_ent->dev->gid; 3734 buf.st_rdev = makedev(pseudo_ent->dev->major, 3735 pseudo_ent->dev->minor); 3736 buf.st_mtime = time(NULL); 3737 buf.st_ino = pseudo_ino ++; 3738 3739 if(pseudo_ent->dev->type == 'd') { 3740 struct dir_ent *dir_ent = 3741 create_dir_entry(pseudo_ent->name, NULL, 3742 pseudo_ent->pathname, dir); 3743 char *subpath = strdup(subpathname(dir_ent)); 3744 struct dir_info *sub_dir = scan1_opendir("", subpath, 3745 dir->depth + 1); 3746 if(sub_dir == NULL) { 3747 ERROR_START("Could not create pseudo directory " 3748 "\"%s\"", pseudo_ent->pathname); 3749 ERROR_EXIT(", skipping...\n"); 3750 free(subpath); 3751 pseudo_ino --; 3752 continue; 3753 } 3754 dir_scan2(sub_dir, pseudo_ent->pseudo); 3755 dir->directory_count ++; 3756 add_dir_entry(dir_ent, sub_dir, 3757 lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0)); 3758 } else if(pseudo_ent->dev->type == 'f') { 3759 add_dir_entry2(pseudo_ent->name, NULL, 3760 pseudo_ent->pathname, NULL, 3761 lookup_inode2(&buf, PSEUDO_FILE_PROCESS, 3762 pseudo_ent->dev->pseudo_id), dir); 3763 } else { 3764 add_dir_entry2(pseudo_ent->name, NULL, 3765 pseudo_ent->pathname, NULL, 3766 lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0), dir); 3767 } 3768 } 3769 } 3770 3771 3772 /* 3773 * dir_scan3 routines... 3774 * This processes the move action 3775 */ 3776 void dir_scan3(struct dir_info *dir) 3777 { 3778 struct dir_ent *dir_ent = NULL; 3779 3780 while((dir_ent = scan2_readdir(dir, dir_ent)) != NULL) { 3781 3782 eval_move_actions(root_dir, dir_ent); 3783 3784 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) 3785 dir_scan3(dir_ent->dir); 3786 } 3787 } 3788 3789 3790 /* 3791 * dir_scan4 routines... 3792 * This processes the prune action. This action is designed to do fine 3793 * grained tuning of the in-core directory structure after the exclude, 3794 * move and pseudo actions have been performed. This allows complex 3795 * tests to be performed which are impossible at exclude time (i.e. 3796 * tests which rely on the in-core directory structure) 3797 */ 3798 void free_dir(struct dir_info *dir) 3799 { 3800 struct dir_ent *dir_ent = dir->list; 3801 3802 while(dir_ent) { 3803 struct dir_ent *tmp = dir_ent; 3804 3805 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) 3806 free_dir(dir_ent->dir); 3807 3808 dir_ent = dir_ent->next; 3809 free_dir_entry(tmp); 3810 } 3811 3812 free(dir->pathname); 3813 free(dir->subpath); 3814 free(dir); 3815 } 3816 3817 3818 void dir_scan4(struct dir_info *dir) 3819 { 3820 struct dir_ent *dir_ent = dir->list, *prev = NULL; 3821 3822 while(dir_ent) { 3823 if(dir_ent->inode->root_entry) { 3824 prev = dir_ent; 3825 dir_ent = dir_ent->next; 3826 continue; 3827 } 3828 3829 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) 3830 dir_scan4(dir_ent->dir); 3831 3832 if(eval_prune_actions(root_dir, dir_ent)) { 3833 struct dir_ent *tmp = dir_ent; 3834 3835 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) { 3836 free_dir(dir_ent->dir); 3837 dir->directory_count --; 3838 } 3839 3840 dir->count --; 3841 3842 /* remove dir_ent from list */ 3843 dir_ent = dir_ent->next; 3844 if(prev) 3845 prev->next = dir_ent; 3846 else 3847 dir->list = dir_ent; 3848 3849 /* free it */ 3850 free_dir_entry(tmp); 3851 3852 add_excluded(dir); 3853 continue; 3854 } 3855 3856 prev = dir_ent; 3857 dir_ent = dir_ent->next; 3858 } 3859 } 3860 3861 3862 /* 3863 * dir_scan5 routines... 3864 * This processes the empty action. This action has to be processed after 3865 * all other actions because the previous exclude and move actions and the 3866 * pseudo actions affect whether a directory is empty 3867 */ 3868 void dir_scan5(struct dir_info *dir) 3869 { 3870 struct dir_ent *dir_ent = dir->list, *prev = NULL; 3871 3872 while(dir_ent) { 3873 if(dir_ent->inode->root_entry) { 3874 prev = dir_ent; 3875 dir_ent = dir_ent->next; 3876 continue; 3877 } 3878 3879 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) { 3880 dir_scan5(dir_ent->dir); 3881 3882 if(eval_empty_actions(root_dir, dir_ent)) { 3883 struct dir_ent *tmp = dir_ent; 3884 3885 /* 3886 * delete sub-directory, this is by definition 3887 * empty 3888 */ 3889 free(dir_ent->dir->pathname); 3890 free(dir_ent->dir->subpath); 3891 free(dir_ent->dir); 3892 3893 /* remove dir_ent from list */ 3894 dir_ent = dir_ent->next; 3895 if(prev) 3896 prev->next = dir_ent; 3897 else 3898 dir->list = dir_ent; 3899 3900 /* free it */ 3901 free_dir_entry(tmp); 3902 3903 /* update counts */ 3904 dir->directory_count --; 3905 dir->count --; 3906 add_excluded(dir); 3907 continue; 3908 } 3909 } 3910 3911 prev = dir_ent; 3912 dir_ent = dir_ent->next; 3913 } 3914 } 3915 3916 3917 /* 3918 * dir_scan6 routines... 3919 * This sorts every directory and computes the inode numbers 3920 */ 3921 3922 /* 3923 * Bottom up linked list merge sort. 3924 * 3925 * Qsort and other O(n log n) algorithms work well with arrays but not 3926 * linked lists. Merge sort another O(n log n) sort algorithm on the other hand 3927 * is not ideal for arrays (as it needs an additonal n storage locations 3928 * as sorting is not done in place), but it is ideal for linked lists because 3929 * it doesn't require any extra storage, 3930 */ 3931 void sort_directory(struct dir_info *dir) 3932 { 3933 struct dir_ent *cur, *l1, *l2, *next; 3934 int len1, len2, stride = 1; 3935 3936 if(dir->list == NULL || dir->count < 2) 3937 return; 3938 3939 /* 3940 * We can consider our linked-list to be made up of stride length 3941 * sublists. Eacn iteration around this loop merges adjacent 3942 * stride length sublists into larger 2*stride sublists. We stop 3943 * when stride becomes equal to the entire list. 3944 * 3945 * Initially stride = 1 (by definition a sublist of 1 is sorted), and 3946 * these 1 element sublists are merged into 2 element sublists, which 3947 * are then merged into 4 element sublists and so on. 3948 */ 3949 do { 3950 l2 = dir->list; /* head of current linked list */ 3951 cur = NULL; /* empty output list */ 3952 3953 /* 3954 * Iterate through the linked list, merging adjacent sublists. 3955 * On each interation l2 points to the next sublist pair to be 3956 * merged (if there's only one sublist left this is simply added 3957 * to the output list) 3958 */ 3959 while(l2) { 3960 l1 = l2; 3961 for(len1 = 0; l2 && len1 < stride; len1 ++, l2 = l2->next); 3962 len2 = stride; 3963 3964 /* 3965 * l1 points to first sublist. 3966 * l2 points to second sublist. 3967 * Merge them onto the output list 3968 */ 3969 while(len1 && l2 && len2) { 3970 if(strcmp(l1->name, l2->name) <= 0) { 3971 next = l1; 3972 l1 = l1->next; 3973 len1 --; 3974 } else { 3975 next = l2; 3976 l2 = l2->next; 3977 len2 --; 3978 } 3979 3980 if(cur) { 3981 cur->next = next; 3982 cur = next; 3983 } else 3984 dir->list = cur = next; 3985 } 3986 /* 3987 * One sublist is now empty, copy the other one onto the 3988 * output list 3989 */ 3990 for(; len1; len1 --, l1 = l1->next) { 3991 if(cur) { 3992 cur->next = l1; 3993 cur = l1; 3994 } else 3995 dir->list = cur = l1; 3996 } 3997 for(; l2 && len2; len2 --, l2 = l2->next) { 3998 if(cur) { 3999 cur->next = l2; 4000 cur = l2; 4001 } else 4002 dir->list = cur = l2; 4003 } 4004 } 4005 cur->next = NULL; 4006 stride = stride << 1; 4007 } while(stride < dir->count); 4008 } 4009 4010 4011 void dir_scan6(struct dir_info *dir) 4012 { 4013 struct dir_ent *dir_ent; 4014 unsigned int byte_count = 0; 4015 4016 sort_directory(dir); 4017 4018 for(dir_ent = dir->list; dir_ent; dir_ent = dir_ent->next) { 4019 byte_count += strlen(dir_ent->name) + 4020 sizeof(struct squashfs_dir_entry); 4021 4022 if(dir_ent->inode->root_entry) 4023 continue; 4024 4025 alloc_inode_no(dir_ent->inode, 0); 4026 4027 if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) 4028 dir_scan6(dir_ent->dir); 4029 } 4030 4031 if((dir->count < 257 && byte_count < SQUASHFS_METADATA_SIZE)) 4032 dir->dir_is_ldir = FALSE; 4033 } 4034 4035 4036 /* 4037 * dir_scan6 routines... 4038 * This generates the filesystem metadata and writes it out to the destination 4039 */ 4040 void scan7_init_dir(struct directory *dir) 4041 { 4042 dir->buff = malloc(SQUASHFS_METADATA_SIZE); 4043 if(dir->buff == NULL) 4044 MEM_ERROR(); 4045 4046 dir->size = SQUASHFS_METADATA_SIZE; 4047 dir->p = dir->index_count_p = dir->buff; 4048 dir->entry_count = 256; 4049 dir->entry_count_p = NULL; 4050 dir->index = NULL; 4051 dir->i_count = dir->i_size = 0; 4052 } 4053 4054 4055 struct dir_ent *scan7_readdir(struct directory *dir, struct dir_info *dir_info, 4056 struct dir_ent *dir_ent) 4057 { 4058 if (dir_ent == NULL) 4059 dir_ent = dir_info->list; 4060 else 4061 dir_ent = dir_ent->next; 4062 4063 for(; dir_ent && dir_ent->inode->root_entry; dir_ent = dir_ent->next) 4064 add_dir(dir_ent->inode->inode, dir_ent->inode->inode_number, 4065 dir_ent->name, dir_ent->inode->type, dir); 4066 4067 return dir_ent; 4068 } 4069 4070 4071 void scan7_freedir(struct directory *dir) 4072 { 4073 if(dir->index) 4074 free(dir->index); 4075 free(dir->buff); 4076 } 4077 4078 4079 void dir_scan7(squashfs_inode *inode, struct dir_info *dir_info) 4080 { 4081 int squashfs_type; 4082 int duplicate_file; 4083 struct directory dir; 4084 struct dir_ent *dir_ent = NULL; 4085 4086 scan7_init_dir(&dir); 4087 4088 while((dir_ent = scan7_readdir(&dir, dir_info, dir_ent)) != NULL) { 4089 struct stat *buf = &dir_ent->inode->buf; 4090 4091 update_info(dir_ent); 4092 4093 if(dir_ent->inode->inode == SQUASHFS_INVALID_BLK) { 4094 switch(buf->st_mode & S_IFMT) { 4095 case S_IFREG: 4096 squashfs_type = SQUASHFS_FILE_TYPE; 4097 write_file(inode, dir_ent, 4098 &duplicate_file); 4099 INFO("file %s, uncompressed size %lld " 4100 "bytes %s\n", 4101 subpathname(dir_ent), 4102 (long long) buf->st_size, 4103 duplicate_file ? "DUPLICATE" : 4104 ""); 4105 break; 4106 4107 case S_IFDIR: 4108 squashfs_type = SQUASHFS_DIR_TYPE; 4109 dir_scan7(inode, dir_ent->dir); 4110 break; 4111 4112 case S_IFLNK: 4113 squashfs_type = SQUASHFS_SYMLINK_TYPE; 4114 create_inode(inode, NULL, dir_ent, 4115 squashfs_type, 0, 0, 0, NULL, 4116 NULL, NULL, 0); 4117 INFO("symbolic link %s inode 0x%llx\n", 4118 subpathname(dir_ent), *inode); 4119 sym_count ++; 4120 break; 4121 4122 case S_IFCHR: 4123 squashfs_type = SQUASHFS_CHRDEV_TYPE; 4124 create_inode(inode, NULL, dir_ent, 4125 squashfs_type, 0, 0, 0, NULL, 4126 NULL, NULL, 0); 4127 INFO("character device %s inode 0x%llx" 4128 "\n", subpathname(dir_ent), 4129 *inode); 4130 dev_count ++; 4131 break; 4132 4133 case S_IFBLK: 4134 squashfs_type = SQUASHFS_BLKDEV_TYPE; 4135 create_inode(inode, NULL, dir_ent, 4136 squashfs_type, 0, 0, 0, NULL, 4137 NULL, NULL, 0); 4138 INFO("block device %s inode 0x%llx\n", 4139 subpathname(dir_ent), *inode); 4140 dev_count ++; 4141 break; 4142 4143 case S_IFIFO: 4144 squashfs_type = SQUASHFS_FIFO_TYPE; 4145 create_inode(inode, NULL, dir_ent, 4146 squashfs_type, 0, 0, 0, NULL, 4147 NULL, NULL, 0); 4148 INFO("fifo %s inode 0x%llx\n", 4149 subpathname(dir_ent), *inode); 4150 fifo_count ++; 4151 break; 4152 4153 case S_IFSOCK: 4154 squashfs_type = SQUASHFS_SOCKET_TYPE; 4155 create_inode(inode, NULL, dir_ent, 4156 squashfs_type, 0, 0, 0, NULL, 4157 NULL, NULL, 0); 4158 INFO("unix domain socket %s inode " 4159 "0x%llx\n", 4160 subpathname(dir_ent), *inode); 4161 sock_count ++; 4162 break; 4163 4164 default: 4165 BAD_ERROR("%s unrecognised file type, " 4166 "mode is %x\n", 4167 subpathname(dir_ent), 4168 buf->st_mode); 4169 } 4170 dir_ent->inode->inode = *inode; 4171 dir_ent->inode->type = squashfs_type; 4172 } else { 4173 *inode = dir_ent->inode->inode; 4174 squashfs_type = dir_ent->inode->type; 4175 switch(squashfs_type) { 4176 case SQUASHFS_FILE_TYPE: 4177 if(!sorted) 4178 INFO("file %s, uncompressed " 4179 "size %lld bytes LINK" 4180 "\n", 4181 subpathname(dir_ent), 4182 (long long) 4183 buf->st_size); 4184 break; 4185 case SQUASHFS_SYMLINK_TYPE: 4186 INFO("symbolic link %s inode 0x%llx " 4187 "LINK\n", subpathname(dir_ent), 4188 *inode); 4189 break; 4190 case SQUASHFS_CHRDEV_TYPE: 4191 INFO("character device %s inode 0x%llx " 4192 "LINK\n", subpathname(dir_ent), 4193 *inode); 4194 break; 4195 case SQUASHFS_BLKDEV_TYPE: 4196 INFO("block device %s inode 0x%llx " 4197 "LINK\n", subpathname(dir_ent), 4198 *inode); 4199 break; 4200 case SQUASHFS_FIFO_TYPE: 4201 INFO("fifo %s inode 0x%llx LINK\n", 4202 subpathname(dir_ent), *inode); 4203 break; 4204 case SQUASHFS_SOCKET_TYPE: 4205 INFO("unix domain socket %s inode " 4206 "0x%llx LINK\n", 4207 subpathname(dir_ent), *inode); 4208 break; 4209 } 4210 } 4211 4212 add_dir(*inode, get_inode_no(dir_ent->inode), dir_ent->name, 4213 squashfs_type, &dir); 4214 } 4215 4216 write_dir(inode, dir_info, &dir); 4217 INFO("directory %s inode 0x%llx\n", subpathname(dir_info->dir_ent), 4218 *inode); 4219 4220 scan7_freedir(&dir); 4221 } 4222 4223 4224 unsigned int slog(unsigned int block) 4225 { 4226 int i; 4227 4228 for(i = 12; i <= 20; i++) 4229 if(block == (1 << i)) 4230 return i; 4231 return 0; 4232 } 4233 4234 4235 int old_excluded(char *filename, struct stat *buf) 4236 { 4237 int i; 4238 4239 for(i = 0; i < exclude; i++) 4240 if((exclude_paths[i].st_dev == buf->st_dev) && 4241 (exclude_paths[i].st_ino == buf->st_ino)) 4242 return TRUE; 4243 return FALSE; 4244 } 4245 4246 4247 #define ADD_ENTRY(buf) \ 4248 if(exclude % EXCLUDE_SIZE == 0) { \ 4249 exclude_paths = realloc(exclude_paths, (exclude + EXCLUDE_SIZE) \ 4250 * sizeof(struct exclude_info)); \ 4251 if(exclude_paths == NULL) \ 4252 MEM_ERROR(); \ 4253 } \ 4254 exclude_paths[exclude].st_dev = buf.st_dev; \ 4255 exclude_paths[exclude++].st_ino = buf.st_ino; 4256 int old_add_exclude(char *path) 4257 { 4258 int i; 4259 char *filename; 4260 struct stat buf; 4261 4262 if(path[0] == '/' || strncmp(path, "./", 2) == 0 || 4263 strncmp(path, "../", 3) == 0) { 4264 if(lstat(path, &buf) == -1) { 4265 ERROR_START("Cannot stat exclude dir/file %s because " 4266 "%s", path, strerror(errno)); 4267 ERROR_EXIT(", ignoring\n"); 4268 return TRUE; 4269 } 4270 ADD_ENTRY(buf); 4271 return TRUE; 4272 } 4273 4274 for(i = 0; i < source; i++) { 4275 int res = asprintf(&filename, "%s/%s", source_path[i], path); 4276 if(res == -1) 4277 BAD_ERROR("asprintf failed in old_add_exclude\n"); 4278 if(lstat(filename, &buf) == -1) { 4279 if(!(errno == ENOENT || errno == ENOTDIR)) { 4280 ERROR_START("Cannot stat exclude dir/file %s " 4281 "because %s", filename, strerror(errno)); 4282 ERROR_EXIT(", ignoring\n"); 4283 } 4284 free(filename); 4285 continue; 4286 } 4287 free(filename); 4288 ADD_ENTRY(buf); 4289 } 4290 return TRUE; 4291 } 4292 4293 4294 void add_old_root_entry(char *name, squashfs_inode inode, int inode_number, 4295 int type) 4296 { 4297 old_root_entry = realloc(old_root_entry, 4298 sizeof(struct old_root_entry_info) * (old_root_entries + 1)); 4299 if(old_root_entry == NULL) 4300 MEM_ERROR(); 4301 4302 old_root_entry[old_root_entries].name = strdup(name); 4303 old_root_entry[old_root_entries].inode.inode = inode; 4304 old_root_entry[old_root_entries].inode.inode_number = inode_number; 4305 old_root_entry[old_root_entries].inode.type = type; 4306 old_root_entry[old_root_entries++].inode.root_entry = TRUE; 4307 } 4308 4309 4310 void initialise_threads(int readq, int fragq, int bwriteq, int fwriteq, 4311 int freelst, char *destination_file) 4312 { 4313 int i; 4314 sigset_t sigmask, old_mask; 4315 int total_mem = readq; 4316 int reader_size; 4317 int fragment_size; 4318 int fwriter_size; 4319 /* 4320 * bwriter_size is global because it is needed in 4321 * write_file_blocks_dup() 4322 */ 4323 4324 /* 4325 * Never allow the total size of the queues to be larger than 4326 * physical memory 4327 * 4328 * When adding together the possibly user supplied values, make 4329 * sure they've not been deliberately contrived to overflow an int 4330 */ 4331 if(add_overflow(total_mem, fragq)) 4332 BAD_ERROR("Queue sizes rediculously too large\n"); 4333 total_mem += fragq; 4334 if(add_overflow(total_mem, bwriteq)) 4335 BAD_ERROR("Queue sizes rediculously too large\n"); 4336 total_mem += bwriteq; 4337 if(add_overflow(total_mem, fwriteq)) 4338 BAD_ERROR("Queue sizes rediculously too large\n"); 4339 total_mem += fwriteq; 4340 4341 check_usable_phys_mem(total_mem); 4342 4343 /* 4344 * convert from queue size in Mbytes to queue size in 4345 * blocks. 4346 * 4347 * This isn't going to overflow an int unless there exists 4348 * systems with more than 8 Petabytes of RAM! 4349 */ 4350 reader_size = readq << (20 - block_log); 4351 fragment_size = fragq << (20 - block_log); 4352 bwriter_size = bwriteq << (20 - block_log); 4353 fwriter_size = fwriteq << (20 - block_log); 4354 4355 /* 4356 * setup signal handlers for the main thread, these cleanup 4357 * deleting the destination file, if appending the 4358 * handlers for SIGTERM and SIGINT will be replaced with handlers 4359 * allowing the user to press ^C twice to restore the existing 4360 * filesystem. 4361 * 4362 * SIGUSR1 is an internal signal, which is used by the sub-threads 4363 * to tell the main thread to terminate, deleting the destination file, 4364 * or if necessary restoring the filesystem on appending 4365 */ 4366 signal(SIGTERM, sighandler); 4367 signal(SIGINT, sighandler); 4368 signal(SIGUSR1, sighandler); 4369 4370 /* block SIGQUIT and SIGHUP, these are handled by the info thread */ 4371 sigemptyset(&sigmask); 4372 sigaddset(&sigmask, SIGQUIT); 4373 sigaddset(&sigmask, SIGHUP); 4374 sigaddset(&sigmask, SIGALRM); 4375 if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1) 4376 BAD_ERROR("Failed to set signal mask in intialise_threads\n"); 4377 4378 /* 4379 * temporarily block these signals, so the created sub-threads 4380 * will ignore them, ensuring the main thread handles them 4381 */ 4382 sigemptyset(&sigmask); 4383 sigaddset(&sigmask, SIGINT); 4384 sigaddset(&sigmask, SIGTERM); 4385 sigaddset(&sigmask, SIGUSR1); 4386 if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) == -1) 4387 BAD_ERROR("Failed to set signal mask in intialise_threads\n"); 4388 4389 if(processors == -1) { 4390 #ifndef linux 4391 int mib[2]; 4392 size_t len = sizeof(processors); 4393 4394 mib[0] = CTL_HW; 4395 #ifdef HW_AVAILCPU 4396 mib[1] = HW_AVAILCPU; 4397 #else 4398 mib[1] = HW_NCPU; 4399 #endif 4400 4401 if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) { 4402 ERROR_START("Failed to get number of available " 4403 "processors."); 4404 ERROR_EXIT(" Defaulting to 1\n"); 4405 processors = 1; 4406 } 4407 #else 4408 processors = sysconf(_SC_NPROCESSORS_ONLN); 4409 #endif 4410 } 4411 4412 if(multiply_overflow(processors, 3) || 4413 multiply_overflow(processors * 3, sizeof(pthread_t))) 4414 BAD_ERROR("Processors too large\n"); 4415 4416 deflator_thread = malloc(processors * 3 * sizeof(pthread_t)); 4417 if(deflator_thread == NULL) 4418 MEM_ERROR(); 4419 4420 frag_deflator_thread = &deflator_thread[processors]; 4421 frag_thread = &frag_deflator_thread[processors]; 4422 4423 to_reader = queue_init(1); 4424 to_deflate = queue_init(reader_size); 4425 to_process_frag = queue_init(reader_size); 4426 to_writer = queue_init(bwriter_size + fwriter_size); 4427 from_writer = queue_init(1); 4428 to_frag = queue_init(fragment_size); 4429 locked_fragment = queue_init(fragment_size); 4430 to_main = seq_queue_init(); 4431 reader_buffer = cache_init(block_size, reader_size, 0, 0); 4432 bwriter_buffer = cache_init(block_size, bwriter_size, 1, freelst); 4433 fwriter_buffer = cache_init(block_size, fwriter_size, 1, freelst); 4434 fragment_buffer = cache_init(block_size, fragment_size, 1, 0); 4435 reserve_cache = cache_init(block_size, processors + 1, 1, 0); 4436 pthread_create(&reader_thread, NULL, reader, NULL); 4437 pthread_create(&writer_thread, NULL, writer, NULL); 4438 init_progress_bar(); 4439 init_info(); 4440 4441 for(i = 0; i < processors; i++) { 4442 if(pthread_create(&deflator_thread[i], NULL, deflator, NULL)) 4443 BAD_ERROR("Failed to create thread\n"); 4444 if(pthread_create(&frag_deflator_thread[i], NULL, frag_deflator, 4445 NULL) != 0) 4446 BAD_ERROR("Failed to create thread\n"); 4447 if(pthread_create(&frag_thread[i], NULL, frag_thrd, 4448 (void *) destination_file) != 0) 4449 BAD_ERROR("Failed to create thread\n"); 4450 } 4451 4452 main_thread = pthread_self(); 4453 4454 printf("Parallel mksquashfs: Using %d processor%s\n", processors, 4455 processors == 1 ? "" : "s"); 4456 4457 /* Restore the signal mask for the main thread */ 4458 if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) == -1) 4459 BAD_ERROR("Failed to set signal mask in intialise_threads\n"); 4460 } 4461 4462 4463 long long write_inode_lookup_table() 4464 { 4465 int i, inode_number, lookup_bytes = SQUASHFS_LOOKUP_BYTES(inode_count); 4466 void *it; 4467 4468 if(inode_count == sinode_count) 4469 goto skip_inode_hash_table; 4470 4471 it = realloc(inode_lookup_table, lookup_bytes); 4472 if(it == NULL) 4473 MEM_ERROR(); 4474 inode_lookup_table = it; 4475 4476 for(i = 0; i < INODE_HASH_SIZE; i ++) { 4477 struct inode_info *inode; 4478 4479 for(inode = inode_info[i]; inode; inode = inode->next) { 4480 4481 inode_number = get_inode_no(inode); 4482 4483 /* The empty action will produce orphaned inode 4484 * entries in the inode_info[] table. These 4485 * entries because they are orphaned will not be 4486 * allocated an inode number in dir_scan5(), so 4487 * skip any entries with the default dummy inode 4488 * number of 0 */ 4489 if(inode_number == 0) 4490 continue; 4491 4492 SQUASHFS_SWAP_LONG_LONGS(&inode->inode, 4493 &inode_lookup_table[inode_number - 1], 1); 4494 4495 } 4496 } 4497 4498 skip_inode_hash_table: 4499 return generic_write_table(lookup_bytes, inode_lookup_table, 0, NULL, 4500 noI); 4501 } 4502 4503 4504 char *get_component(char *target, char **targname) 4505 { 4506 char *start; 4507 4508 while(*target == '/') 4509 target ++; 4510 4511 start = target; 4512 while(*target != '/' && *target != '\0') 4513 target ++; 4514 4515 *targname = strndup(start, target - start); 4516 4517 while(*target == '/') 4518 target ++; 4519 4520 return target; 4521 } 4522 4523 4524 void free_path(struct pathname *paths) 4525 { 4526 int i; 4527 4528 for(i = 0; i < paths->names; i++) { 4529 if(paths->name[i].paths) 4530 free_path(paths->name[i].paths); 4531 free(paths->name[i].name); 4532 if(paths->name[i].preg) { 4533 regfree(paths->name[i].preg); 4534 free(paths->name[i].preg); 4535 } 4536 } 4537 4538 free(paths); 4539 } 4540 4541 4542 struct pathname *add_path(struct pathname *paths, char *target, char *alltarget) 4543 { 4544 char *targname; 4545 int i, error; 4546 4547 target = get_component(target, &targname); 4548 4549 if(paths == NULL) { 4550 paths = malloc(sizeof(struct pathname)); 4551 if(paths == NULL) 4552 MEM_ERROR(); 4553 4554 paths->names = 0; 4555 paths->name = NULL; 4556 } 4557 4558 for(i = 0; i < paths->names; i++) 4559 if(strcmp(paths->name[i].name, targname) == 0) 4560 break; 4561 4562 if(i == paths->names) { 4563 /* allocate new name entry */ 4564 paths->names ++; 4565 paths->name = realloc(paths->name, (i + 1) * 4566 sizeof(struct path_entry)); 4567 if(paths->name == NULL) 4568 MEM_ERROR(); 4569 paths->name[i].name = targname; 4570 paths->name[i].paths = NULL; 4571 if(use_regex) { 4572 paths->name[i].preg = malloc(sizeof(regex_t)); 4573 if(paths->name[i].preg == NULL) 4574 MEM_ERROR(); 4575 error = regcomp(paths->name[i].preg, targname, 4576 REG_EXTENDED|REG_NOSUB); 4577 if(error) { 4578 char str[1024]; /* overflow safe */ 4579 4580 regerror(error, paths->name[i].preg, str, 1024); 4581 BAD_ERROR("invalid regex %s in export %s, " 4582 "because %s\n", targname, alltarget, 4583 str); 4584 } 4585 } else 4586 paths->name[i].preg = NULL; 4587 4588 if(target[0] == '\0') 4589 /* at leaf pathname component */ 4590 paths->name[i].paths = NULL; 4591 else 4592 /* recurse adding child components */ 4593 paths->name[i].paths = add_path(NULL, target, 4594 alltarget); 4595 } else { 4596 /* existing matching entry */ 4597 free(targname); 4598 4599 if(paths->name[i].paths == NULL) { 4600 /* No sub-directory which means this is the leaf 4601 * component of a pre-existing exclude which subsumes 4602 * the exclude currently being added, in which case stop 4603 * adding components */ 4604 } else if(target[0] == '\0') { 4605 /* at leaf pathname component and child components exist 4606 * from more specific excludes, delete as they're 4607 * subsumed by this exclude */ 4608 free_path(paths->name[i].paths); 4609 paths->name[i].paths = NULL; 4610 } else 4611 /* recurse adding child components */ 4612 add_path(paths->name[i].paths, target, alltarget); 4613 } 4614 4615 return paths; 4616 } 4617 4618 4619 void add_exclude(char *target) 4620 { 4621 4622 if(target[0] == '/' || strncmp(target, "./", 2) == 0 || 4623 strncmp(target, "../", 3) == 0) 4624 BAD_ERROR("/, ./ and ../ prefixed excludes not supported with " 4625 "-wildcards or -regex options\n"); 4626 else if(strncmp(target, "... ", 4) == 0) 4627 stickypath = add_path(stickypath, target + 4, target + 4); 4628 else 4629 path = add_path(path, target, target); 4630 } 4631 4632 4633 void display_path(int depth, struct pathname *paths) 4634 { 4635 int i, n; 4636 4637 if(paths == NULL) 4638 return; 4639 4640 for(i = 0; i < paths->names; i++) { 4641 for(n = 0; n < depth; n++) 4642 printf("\t"); 4643 printf("%d: %s\n", depth, paths->name[i].name); 4644 display_path(depth + 1, paths->name[i].paths); 4645 } 4646 } 4647 4648 4649 void display_path2(struct pathname *paths, char *string) 4650 { 4651 int i; 4652 char *path; 4653 4654 if(paths == NULL) { 4655 printf("%s\n", string); 4656 return; 4657 } 4658 4659 for(i = 0; i < paths->names; i++) { 4660 int res = asprintf(&path, "%s/%s", string, paths->name[i].name); 4661 if(res == -1) 4662 BAD_ERROR("asprintf failed in display_path2\n"); 4663 display_path2(paths->name[i].paths, path); 4664 free(path); 4665 } 4666 } 4667 4668 4669 struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path) 4670 { 4671 int count = paths == NULL ? 0 : paths->count; 4672 4673 if(count % PATHS_ALLOC_SIZE == 0) { 4674 paths = realloc(paths, sizeof(struct pathnames) + 4675 (count + PATHS_ALLOC_SIZE) * sizeof(struct pathname *)); 4676 if(paths == NULL) 4677 MEM_ERROR(); 4678 } 4679 4680 paths->path[count] = path; 4681 paths->count = count + 1; 4682 return paths; 4683 } 4684 4685 4686 int excluded_match(char *name, struct pathname *path, struct pathnames **new) 4687 { 4688 int i; 4689 4690 for(i = 0; i < path->names; i++) { 4691 int match = use_regex ? 4692 regexec(path->name[i].preg, name, (size_t) 0, 4693 NULL, 0) == 0 : 4694 fnmatch(path->name[i].name, name, 4695 FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) == 0; 4696 4697 if(match) { 4698 if(path->name[i].paths == NULL || new == NULL) 4699 /* match on a leaf component, any subdirectories 4700 * in the filesystem should be excluded */ 4701 return TRUE; 4702 else 4703 /* match on a non-leaf component, add any 4704 * subdirectories to the new set of 4705 * subdirectories to scan for this name */ 4706 *new = add_subdir(*new, path->name[i].paths); 4707 } 4708 } 4709 4710 return FALSE; 4711 } 4712 4713 4714 int excluded(char *name, struct pathnames *paths, struct pathnames **new) 4715 { 4716 int n; 4717 4718 if(stickypath && excluded_match(name, stickypath, NULL)) 4719 return TRUE; 4720 4721 for(n = 0; paths && n < paths->count; n++) { 4722 int res = excluded_match(name, paths->path[n], new); 4723 if(res) { 4724 free(*new); 4725 *new = NULL; 4726 return TRUE; 4727 } 4728 } 4729 4730 /* 4731 * Either: 4732 * - no matching names found, return empty new search set, or 4733 * - one or more matches with sub-directories found (no leaf matches), 4734 * in which case return new search set. 4735 * 4736 * In either case return FALSE as we don't want to exclude this entry 4737 */ 4738 return FALSE; 4739 } 4740 4741 4742 void process_exclude_file(char *argv) 4743 { 4744 FILE *fd; 4745 char buffer[MAX_LINE + 1]; /* overflow safe */ 4746 char *filename; 4747 4748 fd = fopen(argv, "r"); 4749 if(fd == NULL) 4750 BAD_ERROR("Failed to open exclude file \"%s\" because %s\n", 4751 argv, strerror(errno)); 4752 4753 while(fgets(filename = buffer, MAX_LINE + 1, fd) != NULL) { 4754 int len = strlen(filename); 4755 4756 if(len == MAX_LINE && filename[len - 1] != '\n') 4757 /* line too large */ 4758 BAD_ERROR("Line too long when reading " 4759 "exclude file \"%s\", larger than %d " 4760 "bytes\n", argv, MAX_LINE); 4761 4762 /* 4763 * Remove '\n' terminator if it exists (the last line 4764 * in the file may not be '\n' terminated) 4765 */ 4766 if(len && filename[len - 1] == '\n') 4767 filename[len - 1] = '\0'; 4768 4769 /* Skip any leading whitespace */ 4770 while(isspace(*filename)) 4771 filename ++; 4772 4773 /* if comment line, skip */ 4774 if(*filename == '#') 4775 continue; 4776 4777 /* 4778 * check for initial backslash, to accommodate 4779 * filenames with leading space or leading # character 4780 */ 4781 if(*filename == '\\') 4782 filename ++; 4783 4784 /* if line is now empty after skipping characters, skip it */ 4785 if(*filename == '\0') 4786 continue; 4787 4788 if(old_exclude) 4789 old_add_exclude(filename); 4790 else 4791 add_exclude(filename); 4792 } 4793 4794 if(ferror(fd)) 4795 BAD_ERROR("Reading exclude file \"%s\" failed because %s\n", 4796 argv, strerror(errno)); 4797 4798 fclose(fd); 4799 } 4800 4801 /* ANDROID CHANGES START*/ 4802 #ifdef ANDROID 4803 /* 4804 * Return TRUE (don't compress) if the (regular) file is in the 4805 * whitelist. Else return the Global noD value. 4806 * 4807 * Note : These functions are lifted 100% from the existing exclude 4808 * file code. For maintainability, I've kept this code separate from 4809 * the exclude code instead of having common code for both paths. 4810 */ 4811 static int 4812 whitelisted(struct stat *buf) 4813 { 4814 int i; 4815 4816 /* 4817 * only regular files in the whitelist 4818 */ 4819 if (!S_ISREG(buf->st_mode)) 4820 return noD; 4821 for (i = 0; i < whitelist; i++) { 4822 if ((whitelist_paths[i].st_dev == buf->st_dev) && 4823 (whitelist_paths[i].st_ino == buf->st_ino)) { 4824 /* Don't compress */ 4825 whitelisted_count++; 4826 return TRUE; 4827 } 4828 } 4829 return noD; 4830 } 4831 4832 static void 4833 add_whitelist_entry(char *filename, struct stat *buf) 4834 { 4835 if (!S_ISREG(buf->st_mode)) { 4836 BAD_ERROR("Cannot whitelist %s only regular files can be whitelisted", 4837 filename); 4838 } 4839 if (whitelist % WHITELIST_SIZE == 0) { 4840 whitelist_paths = realloc(whitelist_paths, 4841 (whitelist + WHITELIST_SIZE) 4842 * sizeof(struct whitelist_info)); 4843 if (whitelist_paths == NULL) 4844 MEM_ERROR(); 4845 } 4846 whitelist_paths[whitelist].st_dev = buf->st_dev; 4847 whitelist_paths[whitelist++].st_ino = buf->st_ino; 4848 } 4849 4850 static int 4851 add_whitelist(char *path) 4852 { 4853 int i; 4854 char *filename; 4855 struct stat buf; 4856 4857 /* Absolute of (filesystem) relative path */ 4858 if (path[0] == '/' || strncmp(path, "./", 2) == 0 || 4859 strncmp(path, "../", 3) == 0) { 4860 if(lstat(path, &buf) == -1) { 4861 BAD_ERROR("Cannot stat whitelist dir/file %s because " 4862 "%s", path, strerror(errno)); 4863 } 4864 add_whitelist_entry(path, &buf); 4865 return TRUE; 4866 } 4867 4868 /* pathname relative to mksquashfs source dirs */ 4869 for(i = 0; i < source; i++) { 4870 int res = asprintf(&filename, "%s/%s", source_path[i], path); 4871 if(res == -1) 4872 BAD_ERROR("asprintf failed in add_whitelist\n"); 4873 if(lstat(filename, &buf) == -1) { 4874 if(!(errno == ENOENT || errno == ENOTDIR)) { 4875 BAD_ERROR("Cannot stat whitelist dir/file %s " 4876 "because %s", filename, strerror(errno)); 4877 } 4878 free(filename); 4879 continue; 4880 } 4881 add_whitelist_entry(filename, &buf); 4882 free(filename); 4883 } 4884 return TRUE; 4885 } 4886 4887 static void 4888 process_whitelist_file(char *argv) 4889 { 4890 FILE *fd; 4891 char buffer[MAX_LINE + 1]; /* overflow safe */ 4892 char *filename; 4893 4894 fd = fopen(argv, "r"); 4895 if(fd == NULL) 4896 BAD_ERROR("Failed to open whitelist file \"%s\" because %s\n", 4897 argv, strerror(errno)); 4898 4899 while(fgets(filename = buffer, MAX_LINE + 1, fd) != NULL) { 4900 int len = strlen(filename); 4901 4902 if(len == MAX_LINE && filename[len - 1] != '\n') 4903 /* line too large */ 4904 BAD_ERROR("Line too long when reading " 4905 "whitelist file \"%s\", larger than %d " 4906 "bytes\n", argv, MAX_LINE); 4907 4908 /* 4909 * Remove '\n' terminator if it exists (the last line 4910 * in the file may not be '\n' terminated) 4911 */ 4912 if(len && filename[len - 1] == '\n') 4913 filename[len - 1] = '\0'; 4914 4915 /* Skip any leading whitespace */ 4916 while(isspace(*filename)) 4917 filename ++; 4918 4919 /* if comment line, skip */ 4920 if(*filename == '#') 4921 continue; 4922 4923 /* 4924 * check for initial backslash, to accommodate 4925 * filenames with leading space or leading # character 4926 */ 4927 if(*filename == '\\') 4928 filename ++; 4929 4930 /* if line is now empty after skipping characters, skip it */ 4931 if(*filename == '\0') 4932 continue; 4933 4934 add_whitelist(filename); 4935 } 4936 4937 if(ferror(fd)) 4938 BAD_ERROR("Reading whitelist file \"%s\" failed because %s\n", 4939 argv, strerror(errno)); 4940 4941 fclose(fd); 4942 } 4943 #endif 4944 /* ANDROID CHANGES END */ 4945 4946 #define RECOVER_ID "Squashfs recovery file v1.0\n" 4947 #define RECOVER_ID_SIZE 28 4948 4949 void write_recovery_data(struct squashfs_super_block *sBlk) 4950 { 4951 int res, recoverfd, bytes = sBlk->bytes_used - sBlk->inode_table_start; 4952 pid_t pid = getpid(); 4953 char *metadata; 4954 char header[] = RECOVER_ID; 4955 4956 if(recover == FALSE) { 4957 printf("No recovery data option specified.\n"); 4958 printf("Skipping saving recovery file.\n\n"); 4959 return; 4960 } 4961 4962 metadata = malloc(bytes); 4963 if(metadata == NULL) 4964 MEM_ERROR(); 4965 4966 res = read_fs_bytes(fd, sBlk->inode_table_start, bytes, metadata); 4967 if(res == 0) { 4968 ERROR("Failed to read append filesystem metadata\n"); 4969 BAD_ERROR("Filesystem corrupted?\n"); 4970 } 4971 4972 res = asprintf(&recovery_file, "squashfs_recovery_%s_%d", 4973 getbase(destination_file), pid); 4974 if(res == -1) 4975 MEM_ERROR(); 4976 4977 recoverfd = open(recovery_file, O_CREAT | O_TRUNC | O_RDWR, S_IRWXU); 4978 if(recoverfd == -1) 4979 BAD_ERROR("Failed to create recovery file, because %s. " 4980 "Aborting\n", strerror(errno)); 4981 4982 if(write_bytes(recoverfd, header, RECOVER_ID_SIZE) == -1) 4983 BAD_ERROR("Failed to write recovery file, because %s\n", 4984 strerror(errno)); 4985 4986 if(write_bytes(recoverfd, sBlk, sizeof(struct squashfs_super_block)) == -1) 4987 BAD_ERROR("Failed to write recovery file, because %s\n", 4988 strerror(errno)); 4989 4990 if(write_bytes(recoverfd, metadata, bytes) == -1) 4991 BAD_ERROR("Failed to write recovery file, because %s\n", 4992 strerror(errno)); 4993 4994 close(recoverfd); 4995 free(metadata); 4996 4997 printf("Recovery file \"%s\" written\n", recovery_file); 4998 printf("If Mksquashfs aborts abnormally (i.e. power failure), run\n"); 4999 printf("mksquashfs dummy %s -recover %s\n", destination_file, 5000 recovery_file); 5001 printf("to restore filesystem\n\n"); 5002 } 5003 5004 5005 void read_recovery_data(char *recovery_file, char *destination_file) 5006 { 5007 int fd, recoverfd, bytes; 5008 struct squashfs_super_block orig_sBlk, sBlk; 5009 char *metadata; 5010 int res; 5011 struct stat buf; 5012 char header[] = RECOVER_ID; 5013 char header2[RECOVER_ID_SIZE]; 5014 5015 recoverfd = open(recovery_file, O_RDONLY); 5016 if(recoverfd == -1) 5017 BAD_ERROR("Failed to open recovery file because %s\n", 5018 strerror(errno)); 5019 5020 if(stat(destination_file, &buf) == -1) 5021 BAD_ERROR("Failed to stat destination file, because %s\n", 5022 strerror(errno)); 5023 5024 fd = open(destination_file, O_RDWR); 5025 if(fd == -1) 5026 BAD_ERROR("Failed to open destination file because %s\n", 5027 strerror(errno)); 5028 5029 res = read_bytes(recoverfd, header2, RECOVER_ID_SIZE); 5030 if(res == -1) 5031 BAD_ERROR("Failed to read recovery file, because %s\n", 5032 strerror(errno)); 5033 if(res < RECOVER_ID_SIZE) 5034 BAD_ERROR("Recovery file appears to be truncated\n"); 5035 if(strncmp(header, header2, RECOVER_ID_SIZE) !=0 ) 5036 BAD_ERROR("Not a recovery file\n"); 5037 5038 res = read_bytes(recoverfd, &sBlk, sizeof(struct squashfs_super_block)); 5039 if(res == -1) 5040 BAD_ERROR("Failed to read recovery file, because %s\n", 5041 strerror(errno)); 5042 if(res < sizeof(struct squashfs_super_block)) 5043 BAD_ERROR("Recovery file appears to be truncated\n"); 5044 5045 res = read_fs_bytes(fd, 0, sizeof(struct squashfs_super_block), &orig_sBlk); 5046 if(res == 0) { 5047 ERROR("Failed to read superblock from output filesystem\n"); 5048 BAD_ERROR("Output filesystem is empty!\n"); 5049 } 5050 5051 if(memcmp(((char *) &sBlk) + 4, ((char *) &orig_sBlk) + 4, 5052 sizeof(struct squashfs_super_block) - 4) != 0) 5053 BAD_ERROR("Recovery file and destination file do not seem to " 5054 "match\n"); 5055 5056 bytes = sBlk.bytes_used - sBlk.inode_table_start; 5057 5058 metadata = malloc(bytes); 5059 if(metadata == NULL) 5060 MEM_ERROR(); 5061 5062 res = read_bytes(recoverfd, metadata, bytes); 5063 if(res == -1) 5064 BAD_ERROR("Failed to read recovery file, because %s\n", 5065 strerror(errno)); 5066 if(res < bytes) 5067 BAD_ERROR("Recovery file appears to be truncated\n"); 5068 5069 write_destination(fd, 0, sizeof(struct squashfs_super_block), &sBlk); 5070 5071 write_destination(fd, sBlk.inode_table_start, bytes, metadata); 5072 5073 close(recoverfd); 5074 close(fd); 5075 5076 printf("Successfully wrote recovery file \"%s\". Exiting\n", 5077 recovery_file); 5078 5079 exit(0); 5080 } 5081 5082 5083 void write_filesystem_tables(struct squashfs_super_block *sBlk, int nopad) 5084 { 5085 int i; 5086 5087 sBlk->fragments = fragments; 5088 sBlk->no_ids = id_count; 5089 sBlk->inode_table_start = write_inodes(); 5090 sBlk->directory_table_start = write_directories(); 5091 sBlk->fragment_table_start = write_fragment_table(); 5092 sBlk->lookup_table_start = exportable ? write_inode_lookup_table() : 5093 SQUASHFS_INVALID_BLK; 5094 sBlk->id_table_start = write_id_table(); 5095 sBlk->xattr_id_table_start = write_xattrs(); 5096 5097 TRACE("sBlk->inode_table_start 0x%llx\n", sBlk->inode_table_start); 5098 TRACE("sBlk->directory_table_start 0x%llx\n", 5099 sBlk->directory_table_start); 5100 TRACE("sBlk->fragment_table_start 0x%llx\n", sBlk->fragment_table_start); 5101 if(exportable) 5102 TRACE("sBlk->lookup_table_start 0x%llx\n", 5103 sBlk->lookup_table_start); 5104 5105 sBlk->bytes_used = bytes; 5106 5107 sBlk->compression = comp->id; 5108 5109 SQUASHFS_INSWAP_SUPER_BLOCK(sBlk); 5110 write_destination(fd, SQUASHFS_START, sizeof(*sBlk), sBlk); 5111 5112 if(!nopad && (i = bytes & (4096 - 1))) { 5113 char temp[4096] = {0}; 5114 write_destination(fd, bytes, 4096 - i, temp); 5115 } 5116 5117 close(fd); 5118 5119 if(recovery_file) 5120 unlink(recovery_file); 5121 5122 total_bytes += total_inode_bytes + total_directory_bytes + 5123 sizeof(struct squashfs_super_block) + total_xattr_bytes; 5124 5125 printf("\n%sSquashfs %d.%d filesystem, %s compressed, data block size" 5126 " %d\n", exportable ? "Exportable " : "", SQUASHFS_MAJOR, 5127 SQUASHFS_MINOR, comp->name, block_size); 5128 printf("\t%s data, %s metadata, %s fragments, %s xattrs\n", 5129 noD ? "uncompressed" : "compressed", noI ? "uncompressed" : 5130 "compressed", no_fragments ? "no" : noF ? "uncompressed" : 5131 "compressed", no_xattrs ? "no" : noX ? "uncompressed" : 5132 "compressed"); 5133 printf("\tduplicates are %sremoved\n", duplicate_checking ? "" : 5134 "not "); 5135 printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n", bytes / 1024.0, 5136 bytes / (1024.0 * 1024.0)); 5137 printf("\t%.2f%% of uncompressed filesystem size (%.2f Kbytes)\n", 5138 ((float) bytes / total_bytes) * 100.0, total_bytes / 1024.0); 5139 printf("Inode table size %d bytes (%.2f Kbytes)\n", 5140 inode_bytes, inode_bytes / 1024.0); 5141 printf("\t%.2f%% of uncompressed inode table size (%d bytes)\n", 5142 ((float) inode_bytes / total_inode_bytes) * 100.0, 5143 total_inode_bytes); 5144 printf("Directory table size %d bytes (%.2f Kbytes)\n", 5145 directory_bytes, directory_bytes / 1024.0); 5146 printf("\t%.2f%% of uncompressed directory table size (%d bytes)\n", 5147 ((float) directory_bytes / total_directory_bytes) * 100.0, 5148 total_directory_bytes); 5149 if(total_xattr_bytes) { 5150 printf("Xattr table size %d bytes (%.2f Kbytes)\n", 5151 xattr_bytes, xattr_bytes / 1024.0); 5152 printf("\t%.2f%% of uncompressed xattr table size (%d bytes)\n", 5153 ((float) xattr_bytes / total_xattr_bytes) * 100.0, 5154 total_xattr_bytes); 5155 } 5156 if(duplicate_checking) 5157 printf("Number of duplicate files found %d\n", file_count - 5158 dup_files); 5159 else 5160 printf("No duplicate files removed\n"); 5161 printf("Number of inodes %d\n", inode_count); 5162 printf("Number of files %d\n", file_count); 5163 if(!no_fragments) 5164 printf("Number of fragments %d\n", fragments); 5165 printf("Number of symbolic links %d\n", sym_count); 5166 printf("Number of device nodes %d\n", dev_count); 5167 printf("Number of fifo nodes %d\n", fifo_count); 5168 printf("Number of socket nodes %d\n", sock_count); 5169 printf("Number of directories %d\n", dir_count); 5170 printf("Number of ids (unique uids + gids) %d\n", id_count); 5171 printf("Number of uids %d\n", uid_count); 5172 5173 for(i = 0; i < id_count; i++) { 5174 if(id_table[i]->flags & ISA_UID) { 5175 struct passwd *user = getpwuid(id_table[i]->id); 5176 printf("\t%s (%d)\n", user == NULL ? "unknown" : 5177 user->pw_name, id_table[i]->id); 5178 } 5179 } 5180 5181 printf("Number of gids %d\n", guid_count); 5182 5183 for(i = 0; i < id_count; i++) { 5184 if(id_table[i]->flags & ISA_GID) { 5185 struct group *group = getgrgid(id_table[i]->id); 5186 printf("\t%s (%d)\n", group == NULL ? "unknown" : 5187 group->gr_name, id_table[i]->id); 5188 } 5189 } 5190 5191 printf("Number of whitelisted (uncompressed) files %d\n", 5192 whitelisted_count); 5193 } 5194 5195 5196 int parse_numberll(char *start, long long *res, int size) 5197 { 5198 char *end; 5199 long long number; 5200 5201 errno = 0; /* To distinguish success/failure after call */ 5202 5203 number = strtoll(start, &end, 10); 5204 5205 /* 5206 * check for strtoll underflow or overflow in conversion, and other 5207 * errors. 5208 */ 5209 if((errno == ERANGE && (number == LLONG_MIN || number == LLONG_MAX)) || 5210 (errno != 0 && number == 0)) 5211 return 0; 5212 5213 /* reject negative numbers as invalid */ 5214 if(number < 0) 5215 return 0; 5216 5217 if(size) { 5218 /* 5219 * Check for multiplier and trailing junk. 5220 * But first check that a number exists before the 5221 * multiplier 5222 */ 5223 if(end == start) 5224 return 0; 5225 5226 switch(end[0]) { 5227 case 'g': 5228 case 'G': 5229 if(multiply_overflowll(number, 1073741824)) 5230 return 0; 5231 number *= 1073741824; 5232 5233 if(end[1] != '\0') 5234 /* trailing junk after multiplier, but 5235 * allow it to be "bytes" */ 5236 if(strcmp(end + 1, "bytes")) 5237 return 0; 5238 5239 break; 5240 case 'm': 5241 case 'M': 5242 if(multiply_overflowll(number, 1048576)) 5243 return 0; 5244 number *= 1048576; 5245 5246 if(end[1] != '\0') 5247 /* trailing junk after multiplier, but 5248 * allow it to be "bytes" */ 5249 if(strcmp(end + 1, "bytes")) 5250 return 0; 5251 5252 break; 5253 case 'k': 5254 case 'K': 5255 if(multiply_overflowll(number, 1024)) 5256 return 0; 5257 number *= 1024; 5258 5259 if(end[1] != '\0') 5260 /* trailing junk after multiplier, but 5261 * allow it to be "bytes" */ 5262 if(strcmp(end + 1, "bytes")) 5263 return 0; 5264 5265 break; 5266 case '\0': 5267 break; 5268 default: 5269 /* trailing junk after number */ 5270 return 0; 5271 } 5272 } else if(end[0] != '\0') 5273 /* trailing junk after number */ 5274 return 0; 5275 5276 *res = number; 5277 return 1; 5278 } 5279 5280 5281 int parse_number(char *start, int *res, int size) 5282 { 5283 long long number; 5284 5285 if(!parse_numberll(start, &number, size)) 5286 return 0; 5287 5288 /* check if long result will overflow signed int */ 5289 if(number > INT_MAX) 5290 return 0; 5291 5292 *res = (int) number; 5293 return 1; 5294 } 5295 5296 5297 int parse_num(char *arg, int *res) 5298 { 5299 return parse_number(arg, res, 0); 5300 } 5301 5302 5303 int get_physical_memory() 5304 { 5305 int phys_mem; 5306 #ifndef linux 5307 #ifdef HW_MEMSIZE 5308 #define SYSCTL_PHYSMEM HW_MEMSIZE 5309 #elif defined(HW_PHYSMEM64) 5310 #define SYSCTL_PHYSMEM HW_PHYSMEM64 5311 #else 5312 #define SYSCTL_PHYSMEM HW_PHYSMEM 5313 #endif 5314 5315 int mib[2]; 5316 uint64_t sysctl_physmem = 0; 5317 size_t sysctl_len = sizeof(sysctl_physmem); 5318 5319 mib[0] = CTL_HW; 5320 mib[1] = SYSCTL_PHYSMEM; 5321 5322 if(sysctl(mib, 2, &sysctl_physmem, &sysctl_len, NULL, 0) == 0) { 5323 /* some systems use 32-bit values, work with what we're given */ 5324 if (sysctl_len == 4) 5325 sysctl_physmem = *(uint32_t*)&sysctl_physmem; 5326 phys_mem = sysctl_physmem >> 20; 5327 } else { 5328 ERROR_START("Failed to get amount of available " 5329 "memory."); 5330 ERROR_EXIT(" Defaulting to least viable amount\n"); 5331 phys_mem = SQUASHFS_LOWMEM; 5332 } 5333 #undef SYSCTL_PHYSMEM 5334 #else 5335 /* Long longs are used here because with PAE, a 32-bit 5336 machine can have more than 4GB of physical memory */ 5337 5338 long long num_pages = sysconf(_SC_PHYS_PAGES); 5339 long long page_size = sysconf(_SC_PAGESIZE); 5340 phys_mem = num_pages * page_size >> 20; 5341 if(num_pages == -1 || page_size == -1) 5342 return 0; 5343 5344 #endif 5345 5346 if(phys_mem < SQUASHFS_LOWMEM) 5347 BAD_ERROR("Mksquashfs requires more physical memory than is " 5348 "available!\n"); 5349 5350 return phys_mem; 5351 } 5352 5353 5354 void check_usable_phys_mem(int total_mem) 5355 { 5356 /* 5357 * We want to allow users to use as much of their physical 5358 * memory as they wish. However, for practical reasons there are 5359 * limits which need to be imposed, to protect users from themselves 5360 * and to prevent people from using Mksquashfs as a DOS attack by using 5361 * all physical memory. Mksquashfs uses memory to cache data from disk 5362 * to optimise performance. It is pointless to ask it to use more 5363 * than 75% of physical memory, as this causes thrashing and it is thus 5364 * self-defeating. 5365 */ 5366 int mem = get_physical_memory(); 5367 5368 mem = (mem >> 1) + (mem >> 2); /* 75% */ 5369 5370 if(total_mem > mem && mem) { 5371 ERROR("Total memory requested is more than 75%% of physical " 5372 "memory.\n"); 5373 ERROR("Mksquashfs uses memory to cache data from disk to " 5374 "optimise performance.\n"); 5375 ERROR("It is pointless to ask it to use more than this amount " 5376 "of memory, as this\n"); 5377 ERROR("causes thrashing and it is thus self-defeating.\n"); 5378 BAD_ERROR("Requested memory size too large\n"); 5379 } 5380 5381 if(sizeof(void *) == 4 && total_mem > 2048) { 5382 /* 5383 * If we're running on a kernel with PAE or on a 64-bit kernel, 5384 * then the 75% physical memory limit can still easily exceed 5385 * the addressable memory by this process. 5386 * 5387 * Due to the typical kernel/user-space split (1GB/3GB, or 5388 * 2GB/2GB), we have to conservatively assume the 32-bit 5389 * processes can only address 2-3GB. So refuse if the user 5390 * tries to allocate more than 2GB. 5391 */ 5392 ERROR("Total memory requested may exceed maximum " 5393 "addressable memory by this process\n"); 5394 BAD_ERROR("Requested memory size too large\n"); 5395 } 5396 } 5397 5398 5399 int get_default_phys_mem() 5400 { 5401 /* 5402 * get_physical_memory() relies on /proc being mounted. 5403 * If it fails, issue a warning, and use 5404 * SQUASHFS_LOWMEM / SQUASHFS_TAKE as default, 5405 * and allow a larger value to be set with -mem. 5406 */ 5407 int mem = get_physical_memory(); 5408 5409 if(mem == 0) { 5410 mem = SQUASHFS_LOWMEM / SQUASHFS_TAKE; 5411 5412 ERROR("Warning: Cannot get size of physical memory, probably " 5413 "because /proc is missing.\n"); 5414 ERROR("Warning: Defaulting to minimal use of %d Mbytes, use " 5415 "-mem to set a better value,\n", mem); 5416 ERROR("Warning: or fix /proc.\n"); 5417 } else 5418 mem /= SQUASHFS_TAKE; 5419 5420 if(sizeof(void *) == 4 && mem > 640) { 5421 /* 5422 * If we're running on a kernel with PAE or on a 64-bit kernel, 5423 * the default memory usage can exceed the addressable 5424 * memory by this process. 5425 * Due to the typical kernel/user-space split (1GB/3GB, or 5426 * 2GB/2GB), we have to conservatively assume the 32-bit 5427 * processes can only address 2-3GB. So limit the default 5428 * usage to 640M, which gives room for other data. 5429 */ 5430 mem = 640; 5431 } 5432 5433 return mem; 5434 } 5435 5436 5437 void calculate_queue_sizes(int mem, int *readq, int *fragq, int *bwriteq, 5438 int *fwriteq) 5439 { 5440 *readq = mem / SQUASHFS_READQ_MEM; 5441 *bwriteq = mem / SQUASHFS_BWRITEQ_MEM; 5442 *fwriteq = mem / SQUASHFS_FWRITEQ_MEM; 5443 *fragq = mem - *readq - *bwriteq - *fwriteq; 5444 } 5445 5446 5447 #define VERSION() \ 5448 printf("mksquashfs version 4.3-git (2014/09/12)\n");\ 5449 printf("copyright (C) 2014 Phillip Lougher "\ 5450 "<phillip (at) squashfs.org.uk>\n\n"); \ 5451 printf("This program is free software; you can redistribute it and/or"\ 5452 "\n");\ 5453 printf("modify it under the terms of the GNU General Public License"\ 5454 "\n");\ 5455 printf("as published by the Free Software Foundation; either version "\ 5456 "2,\n");\ 5457 printf("or (at your option) any later version.\n\n");\ 5458 printf("This program is distributed in the hope that it will be "\ 5459 "useful,\n");\ 5460 printf("but WITHOUT ANY WARRANTY; without even the implied warranty "\ 5461 "of\n");\ 5462 printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"\ 5463 "\n");\ 5464 printf("GNU General Public License for more details.\n"); 5465 int main(int argc, char *argv[]) 5466 { 5467 struct stat buf, source_buf; 5468 int res, i; 5469 char *b, *root_name = NULL; 5470 int keep_as_directory = FALSE; 5471 squashfs_inode inode; 5472 int readq; 5473 int fragq; 5474 int bwriteq; 5475 int fwriteq; 5476 int total_mem = get_default_phys_mem(); 5477 int progress = TRUE; 5478 int force_progress = FALSE; 5479 struct file_buffer **fragment = NULL; 5480 /* ANDROID CHANGES START*/ 5481 #ifdef ANDROID 5482 const char *fs_config_file = NULL; 5483 #endif 5484 /* ANDROID CHANGES END */ 5485 5486 if(argc > 1 && strcmp(argv[1], "-version") == 0) { 5487 VERSION(); 5488 exit(0); 5489 } 5490 5491 block_log = slog(block_size); 5492 calculate_queue_sizes(total_mem, &readq, &fragq, &bwriteq, &fwriteq); 5493 5494 for(i = 1; i < argc && argv[i][0] != '-'; i++); 5495 if(i < 3) 5496 goto printOptions; 5497 source_path = argv + 1; 5498 source = i - 2; 5499 5500 /* 5501 * Scan the command line for -comp xxx option, this is to ensure 5502 * any -X compressor specific options are passed to the 5503 * correct compressor 5504 */ 5505 for(; i < argc; i++) { 5506 struct compressor *prev_comp = comp; 5507 5508 if(strcmp(argv[i], "-comp") == 0) { 5509 if(++i == argc) { 5510 ERROR("%s: -comp missing compression type\n", 5511 argv[0]); 5512 exit(1); 5513 } 5514 comp = lookup_compressor(argv[i]); 5515 if(!comp->supported) { 5516 ERROR("%s: Compressor \"%s\" is not supported!" 5517 "\n", argv[0], argv[i]); 5518 ERROR("%s: Compressors available:\n", argv[0]); 5519 display_compressors("", COMP_DEFAULT); 5520 exit(1); 5521 } 5522 if(prev_comp != NULL && prev_comp != comp) { 5523 ERROR("%s: -comp multiple conflicting -comp" 5524 " options specified on command line" 5525 ", previously %s, now %s\n", argv[0], 5526 prev_comp->name, comp->name); 5527 exit(1); 5528 } 5529 compressor_opt_parsed = 1; 5530 5531 } else if(strcmp(argv[i], "-e") == 0) 5532 break; 5533 else if(strcmp(argv[i], "-root-becomes") == 0 || 5534 strcmp(argv[i], "-ef") == 0 || 5535 strcmp(argv[i], "-pf") == 0 || 5536 strcmp(argv[i], "-vaf") == 0 || 5537 strcmp(argv[i], "-comp") == 0) 5538 i++; 5539 } 5540 5541 /* 5542 * if no -comp option specified lookup default compressor. Note the 5543 * Makefile ensures the default compressor has been built, and so we 5544 * don't need to to check for failure here 5545 */ 5546 if(comp == NULL) 5547 comp = lookup_compressor(COMP_DEFAULT); 5548 5549 for(i = source + 2; i < argc; i++) { 5550 if(strcmp(argv[i], "-action") == 0 || 5551 strcmp(argv[i], "-a") ==0) { 5552 if(++i == argc) { 5553 ERROR("%s: %s missing action\n", 5554 argv[0], argv[i - 1]); 5555 exit(1); 5556 } 5557 res = parse_action(argv[i], ACTION_LOG_NONE); 5558 if(res == 0) 5559 exit(1); 5560 5561 } else if(strcmp(argv[i], "-verbose-action") == 0 || 5562 strcmp(argv[i], "-va") ==0) { 5563 if(++i == argc) { 5564 ERROR("%s: %s missing action\n", 5565 argv[0], argv[i - 1]); 5566 exit(1); 5567 } 5568 res = parse_action(argv[i], ACTION_LOG_VERBOSE); 5569 if(res == 0) 5570 exit(1); 5571 5572 } else if(strcmp(argv[i], "-true-action") == 0 || 5573 strcmp(argv[i], "-ta") ==0) { 5574 if(++i == argc) { 5575 ERROR("%s: %s missing action\n", 5576 argv[0], argv[i - 1]); 5577 exit(1); 5578 } 5579 res = parse_action(argv[i], ACTION_LOG_TRUE); 5580 if(res == 0) 5581 exit(1); 5582 5583 } else if(strcmp(argv[i], "-false-action") == 0 || 5584 strcmp(argv[i], "-fa") ==0) { 5585 if(++i == argc) { 5586 ERROR("%s: %s missing action\n", 5587 argv[0], argv[i - 1]); 5588 exit(1); 5589 } 5590 res = parse_action(argv[i], ACTION_LOG_FALSE); 5591 if(res == 0) 5592 exit(1); 5593 5594 } else if(strcmp(argv[i], "-action-file") == 0 || 5595 strcmp(argv[i], "-af") ==0) { 5596 if(++i == argc) { 5597 ERROR("%s: %s missing filename\n", argv[0], 5598 argv[i - 1]); 5599 exit(1); 5600 } 5601 if(read_action_file(argv[i], ACTION_LOG_NONE) == FALSE) 5602 exit(1); 5603 5604 } else if(strcmp(argv[i], "-verbose-action-file") == 0 || 5605 strcmp(argv[i], "-vaf") ==0) { 5606 if(++i == argc) { 5607 ERROR("%s: %s missing filename\n", argv[0], 5608 argv[i - 1]); 5609 exit(1); 5610 } 5611 if(read_action_file(argv[i], ACTION_LOG_VERBOSE) == FALSE) 5612 exit(1); 5613 5614 } else if(strcmp(argv[i], "-true-action-file") == 0 || 5615 strcmp(argv[i], "-taf") ==0) { 5616 if(++i == argc) { 5617 ERROR("%s: %s missing filename\n", argv[0], 5618 argv[i - 1]); 5619 exit(1); 5620 } 5621 if(read_action_file(argv[i], ACTION_LOG_TRUE) == FALSE) 5622 exit(1); 5623 5624 } else if(strcmp(argv[i], "-false-action-file") == 0 || 5625 strcmp(argv[i], "-faf") ==0) { 5626 if(++i == argc) { 5627 ERROR("%s: %s missing filename\n", argv[0], 5628 argv[i - 1]); 5629 exit(1); 5630 } 5631 if(read_action_file(argv[i], ACTION_LOG_FALSE) == FALSE) 5632 exit(1); 5633 5634 } else if(strcmp(argv[i], "-comp") == 0) 5635 /* parsed previously */ 5636 i++; 5637 5638 else if(strncmp(argv[i], "-X", 2) == 0) { 5639 int args; 5640 5641 if(strcmp(argv[i] + 2, "help") == 0) 5642 goto print_compressor_options; 5643 5644 args = compressor_options(comp, argv + i, argc - i); 5645 if(args < 0) { 5646 if(args == -1) { 5647 ERROR("%s: Unrecognised compressor" 5648 " option %s\n", argv[0], 5649 argv[i]); 5650 if(!compressor_opt_parsed) 5651 ERROR("%s: Did you forget to" 5652 " specify -comp?\n", 5653 argv[0]); 5654 print_compressor_options: 5655 ERROR("%s: selected compressor \"%s\"" 5656 ". Options supported: %s\n", 5657 argv[0], comp->name, 5658 comp->usage ? "" : "none"); 5659 if(comp->usage) 5660 comp->usage(); 5661 } 5662 exit(1); 5663 } 5664 i += args; 5665 5666 } else if(strcmp(argv[i], "-pf") == 0) { 5667 if(++i == argc) { 5668 ERROR("%s: -pf missing filename\n", argv[0]); 5669 exit(1); 5670 } 5671 if(read_pseudo_file(argv[i]) == FALSE) 5672 exit(1); 5673 } else if(strcmp(argv[i], "-p") == 0) { 5674 if(++i == argc) { 5675 ERROR("%s: -p missing pseudo file definition\n", 5676 argv[0]); 5677 exit(1); 5678 } 5679 if(read_pseudo_def(argv[i]) == FALSE) 5680 exit(1); 5681 } else if(strcmp(argv[i], "-recover") == 0) { 5682 if(++i == argc) { 5683 ERROR("%s: -recover missing recovery file\n", 5684 argv[0]); 5685 exit(1); 5686 } 5687 read_recovery_data(argv[i], argv[source + 1]); 5688 } else if(strcmp(argv[i], "-no-recovery") == 0) 5689 recover = FALSE; 5690 else if(strcmp(argv[i], "-wildcards") == 0) { 5691 old_exclude = FALSE; 5692 use_regex = FALSE; 5693 } else if(strcmp(argv[i], "-regex") == 0) { 5694 old_exclude = FALSE; 5695 use_regex = TRUE; 5696 } else if(strcmp(argv[i], "-no-sparse") == 0) 5697 sparse_files = FALSE; 5698 else if(strcmp(argv[i], "-no-progress") == 0) 5699 progress = FALSE; 5700 else if(strcmp(argv[i], "-progress") == 0) 5701 force_progress = TRUE; 5702 else if(strcmp(argv[i], "-no-exports") == 0) 5703 exportable = FALSE; 5704 else if(strcmp(argv[i], "-processors") == 0) { 5705 if((++i == argc) || !parse_num(argv[i], &processors)) { 5706 ERROR("%s: -processors missing or invalid " 5707 "processor number\n", argv[0]); 5708 exit(1); 5709 } 5710 if(processors < 1) { 5711 ERROR("%s: -processors should be 1 or larger\n", 5712 argv[0]); 5713 exit(1); 5714 } 5715 } else if(strcmp(argv[i], "-read-queue") == 0) { 5716 if((++i == argc) || !parse_num(argv[i], &readq)) { 5717 ERROR("%s: -read-queue missing or invalid " 5718 "queue size\n", argv[0]); 5719 exit(1); 5720 } 5721 if(readq < 1) { 5722 ERROR("%s: -read-queue should be 1 megabyte or " 5723 "larger\n", argv[0]); 5724 exit(1); 5725 } 5726 } else if(strcmp(argv[i], "-write-queue") == 0) { 5727 if((++i == argc) || !parse_num(argv[i], &bwriteq)) { 5728 ERROR("%s: -write-queue missing or invalid " 5729 "queue size\n", argv[0]); 5730 exit(1); 5731 } 5732 if(bwriteq < 2) { 5733 ERROR("%s: -write-queue should be 2 megabytes " 5734 "or larger\n", argv[0]); 5735 exit(1); 5736 } 5737 fwriteq = bwriteq >> 1; 5738 bwriteq -= fwriteq; 5739 } else if(strcmp(argv[i], "-fragment-queue") == 0) { 5740 if((++i == argc) || !parse_num(argv[i], &fragq)) { 5741 ERROR("%s: -fragment-queue missing or invalid " 5742 "queue size\n", argv[0]); 5743 exit(1); 5744 } 5745 if(fragq < 1) { 5746 ERROR("%s: -fragment-queue should be 1 " 5747 "megabyte or larger\n", argv[0]); 5748 exit(1); 5749 } 5750 } else if(strcmp(argv[i], "-mem") == 0) { 5751 long long number; 5752 5753 if((++i == argc) || 5754 !parse_numberll(argv[i], &number, 1)) { 5755 ERROR("%s: -mem missing or invalid mem size\n", 5756 argv[0]); 5757 exit(1); 5758 } 5759 5760 /* 5761 * convert from bytes to Mbytes, ensuring the value 5762 * does not overflow a signed int 5763 */ 5764 if(number >= (1LL << 51)) { 5765 ERROR("%s: -mem invalid mem size\n", argv[0]); 5766 exit(1); 5767 } 5768 5769 total_mem = number / 1048576; 5770 if(total_mem < (SQUASHFS_LOWMEM / SQUASHFS_TAKE)) { 5771 ERROR("%s: -mem should be %d Mbytes or " 5772 "larger\n", argv[0], 5773 SQUASHFS_LOWMEM / SQUASHFS_TAKE); 5774 exit(1); 5775 } 5776 calculate_queue_sizes(total_mem, &readq, &fragq, 5777 &bwriteq, &fwriteq); 5778 } else if(strcmp(argv[i], "-b") == 0) { 5779 if(++i == argc) { 5780 ERROR("%s: -b missing block size\n", argv[0]); 5781 exit(1); 5782 } 5783 if(!parse_number(argv[i], &block_size, 1)) { 5784 ERROR("%s: -b invalid block size\n", argv[0]); 5785 exit(1); 5786 } 5787 if((block_log = slog(block_size)) == 0) { 5788 ERROR("%s: -b block size not power of two or " 5789 "not between 4096 and 1Mbyte\n", 5790 argv[0]); 5791 exit(1); 5792 } 5793 } else if(strcmp(argv[i], "-ef") == 0) { 5794 if(++i == argc) { 5795 ERROR("%s: -ef missing filename\n", argv[0]); 5796 exit(1); 5797 } 5798 } else if(strcmp(argv[i], "-no-duplicates") == 0) 5799 duplicate_checking = FALSE; 5800 5801 else if(strcmp(argv[i], "-no-fragments") == 0) 5802 no_fragments = TRUE; 5803 5804 else if(strcmp(argv[i], "-always-use-fragments") == 0) 5805 always_use_fragments = TRUE; 5806 5807 else if(strcmp(argv[i], "-sort") == 0) { 5808 if(++i == argc) { 5809 ERROR("%s: -sort missing filename\n", argv[0]); 5810 exit(1); 5811 } 5812 } else if(strcmp(argv[i], "-all-root") == 0 || 5813 strcmp(argv[i], "-root-owned") == 0) 5814 global_uid = global_gid = 0; 5815 5816 else if(strcmp(argv[i], "-force-uid") == 0) { 5817 if(++i == argc) { 5818 ERROR("%s: -force-uid missing uid or user\n", 5819 argv[0]); 5820 exit(1); 5821 } 5822 if((global_uid = strtoll(argv[i], &b, 10)), *b =='\0') { 5823 if(global_uid < 0 || global_uid > 5824 (((long long) 1 << 32) - 1)) { 5825 ERROR("%s: -force-uid uid out of range" 5826 "\n", argv[0]); 5827 exit(1); 5828 } 5829 } else { 5830 struct passwd *uid = getpwnam(argv[i]); 5831 if(uid) 5832 global_uid = uid->pw_uid; 5833 else { 5834 ERROR("%s: -force-uid invalid uid or " 5835 "unknown user\n", argv[0]); 5836 exit(1); 5837 } 5838 } 5839 } else if(strcmp(argv[i], "-force-gid") == 0) { 5840 if(++i == argc) { 5841 ERROR("%s: -force-gid missing gid or group\n", 5842 argv[0]); 5843 exit(1); 5844 } 5845 if((global_gid = strtoll(argv[i], &b, 10)), *b =='\0') { 5846 if(global_gid < 0 || global_gid > 5847 (((long long) 1 << 32) - 1)) { 5848 ERROR("%s: -force-gid gid out of range" 5849 "\n", argv[0]); 5850 exit(1); 5851 } 5852 } else { 5853 struct group *gid = getgrnam(argv[i]); 5854 if(gid) 5855 global_gid = gid->gr_gid; 5856 else { 5857 ERROR("%s: -force-gid invalid gid or " 5858 "unknown group\n", argv[0]); 5859 exit(1); 5860 } 5861 } 5862 } else if(strcmp(argv[i], "-noI") == 0 || 5863 strcmp(argv[i], "-noInodeCompression") == 0) 5864 noI = TRUE; 5865 5866 else if(strcmp(argv[i], "-noD") == 0 || 5867 strcmp(argv[i], "-noDataCompression") == 0) 5868 noD = TRUE; 5869 5870 else if(strcmp(argv[i], "-noF") == 0 || 5871 strcmp(argv[i], "-noFragmentCompression") == 0) 5872 noF = TRUE; 5873 5874 else if(strcmp(argv[i], "-noX") == 0 || 5875 strcmp(argv[i], "-noXattrCompression") == 0) 5876 noX = TRUE; 5877 5878 else if(strcmp(argv[i], "-no-xattrs") == 0) 5879 no_xattrs = TRUE; 5880 5881 else if(strcmp(argv[i], "-xattrs") == 0) 5882 no_xattrs = FALSE; 5883 5884 /* ANDROID CHANGES START*/ 5885 #ifdef ANDROID 5886 else if(strcmp(argv[i], "-context-file") == 0) { 5887 if(++i == argc) { 5888 ERROR("%s: -context-file: missing file name\n", 5889 argv[0]); 5890 exit(1); 5891 } 5892 context_file = argv[i]; 5893 } 5894 else if(strcmp(argv[i], "-fs-config-file") == 0) { 5895 if(++i == argc) { 5896 ERROR("%s: -fs-config-file: missing file name\n", 5897 argv[0]); 5898 exit(1); 5899 } 5900 fs_config_file = argv[i]; 5901 } else if(strcmp(argv[i], "-whitelist") == 0) { 5902 if(++i == argc) { 5903 ERROR("%s: -whitelist missing filename\n", argv[0]); 5904 exit(1); 5905 } 5906 whitelist_filename = argv[i]; 5907 } 5908 else if(strcmp(argv[i], "-t") == 0) { 5909 if(++i == argc) { 5910 ERROR("%s: -t missing compression threshold percentage\n", argv[0]); 5911 exit(1); 5912 } 5913 if(!parse_number(argv[i], &compress_thresh_per, 1)) { 5914 ERROR("%s: -t invalid compression threshold percentage\n", argv[0]); 5915 exit(1); 5916 } 5917 if(compress_thresh_per > 100 || compress_thresh_per < 0) { 5918 ERROR("%s: -t compression threshold percentage not between 0 and 100\n", 5919 argv[0]); 5920 exit(1); 5921 } 5922 } 5923 #endif 5924 /* ANDROID CHANGES END */ 5925 else if(strcmp(argv[i], "-nopad") == 0) 5926 nopad = TRUE; 5927 5928 else if(strcmp(argv[i], "-info") == 0) 5929 silent = FALSE; 5930 5931 else if(strcmp(argv[i], "-e") == 0) 5932 break; 5933 5934 else if(strcmp(argv[i], "-noappend") == 0) 5935 delete = TRUE; 5936 5937 else if(strcmp(argv[i], "-keep-as-directory") == 0) 5938 keep_as_directory = TRUE; 5939 /* ANDROID CHANGES START*/ 5940 #ifdef ANDROID 5941 else if(strcmp(argv[i], "-android-fs-config") == 0) 5942 android_config = TRUE; 5943 else if(strcmp(argv[i], "-mount-point") == 0) { 5944 if(++i == argc) { 5945 ERROR("%s: -mount-point: missing mount point name\n", 5946 argv[0]); 5947 exit(1); 5948 } 5949 mount_point = argv[i]; 5950 } 5951 else if(strcmp(argv[i], "-product-out") == 0) { 5952 if(++i == argc) { 5953 ERROR("%s: -product-out: missing path name\n", 5954 argv[0]); 5955 exit(1); 5956 } 5957 target_out_path = argv[i]; 5958 } 5959 else if(strcmp(argv[i], "-disable-4k-align") == 0) 5960 align_4k_blocks = FALSE; 5961 else if(strcmp(argv[i], "-block-map") == 0) { 5962 if(++i == argc) { 5963 ERROR("%s: -block-map: missing path name\n", 5964 argv[0]); 5965 exit(1); 5966 } 5967 block_map_file = fopen(argv[i], "w"); 5968 if (block_map_file == NULL) { 5969 ERROR("%s: -block-map: failed to open %s\n", 5970 argv[0], argv[i]); 5971 exit(1); 5972 } 5973 if (!align_4k_blocks) { 5974 ERROR("WARNING: Using block maps with unaligned 4k blocks " 5975 "is not ideal as block map offsets are multiples of 4k, " 5976 "consider not passing -disable-4k-align\n"); 5977 } 5978 } 5979 #endif 5980 /* ANDROID CHANGES END */ 5981 5982 else if(strcmp(argv[i], "-exit-on-error") == 0) 5983 exit_on_error = TRUE; 5984 5985 else if(strcmp(argv[i], "-root-becomes") == 0) { 5986 if(++i == argc) { 5987 ERROR("%s: -root-becomes: missing name\n", 5988 argv[0]); 5989 exit(1); 5990 } 5991 root_name = argv[i]; 5992 } else if(strcmp(argv[i], "-version") == 0) { 5993 VERSION(); 5994 } else { 5995 ERROR("%s: invalid option\n\n", argv[0]); 5996 printOptions: 5997 ERROR("SYNTAX:%s source1 source2 ... dest [options] " 5998 "[-e list of exclude\ndirs/files]\n", argv[0]); 5999 ERROR("\nFilesystem build options:\n"); 6000 ERROR("-comp <comp>\t\tselect <comp> compression\n"); 6001 ERROR("\t\t\tCompressors available:\n"); 6002 display_compressors("\t\t\t", COMP_DEFAULT); 6003 ERROR("-b <block_size>\t\tset data block to " 6004 "<block_size>. Default 128 Kbytes\n"); 6005 ERROR("\t\t\tOptionally a suffix of K or M can be" 6006 " given to specify\n\t\t\tKbytes or Mbytes" 6007 " respectively\n"); 6008 ERROR("-no-exports\t\tdon't make the filesystem " 6009 "exportable via NFS\n"); 6010 ERROR("-no-sparse\t\tdon't detect sparse files\n"); 6011 ERROR("-no-xattrs\t\tdon't store extended attributes" 6012 NOXOPT_STR "\n"); 6013 ERROR("-xattrs\t\t\tstore extended attributes" XOPT_STR 6014 "\n"); 6015 /* ANDROID CHANGES START*/ 6016 #ifdef ANDROID 6017 ERROR("-context-file <file>\tApply selinux security " 6018 "xattrs from context-file instead\n\t\t\t" 6019 "of reading xattrs from file system\n"); 6020 ERROR("-fs-config-file <file>\tAndroid specific " 6021 "filesystem config file\n"); 6022 ERROR("-t <compress_thresh>\tset minimum " 6023 "acceptable compression ratio of a block to\n\t\t\t" 6024 "<compress_thresh_per> otherwise don't compress. " 6025 "Default 0%\n"); 6026 ERROR("-whitelist <file>\tAndroid specific whitelist " 6027 "one entry per line (no wildcards)\n"); 6028 #endif 6029 /* ANDROID CHANGES END */ 6030 ERROR("-noI\t\t\tdo not compress inode table\n"); 6031 ERROR("-noD\t\t\tdo not compress data blocks\n"); 6032 ERROR("-noF\t\t\tdo not compress fragment blocks\n"); 6033 ERROR("-noX\t\t\tdo not compress extended " 6034 "attributes\n"); 6035 ERROR("-no-fragments\t\tdo not use fragments\n"); 6036 ERROR("-always-use-fragments\tuse fragment blocks for " 6037 "files larger than block size\n"); 6038 ERROR("-no-duplicates\t\tdo not perform duplicate " 6039 "checking\n"); 6040 ERROR("-all-root\t\tmake all files owned by root\n"); 6041 ERROR("-force-uid uid\t\tset all file uids to uid\n"); 6042 ERROR("-force-gid gid\t\tset all file gids to gid\n"); 6043 ERROR("-nopad\t\t\tdo not pad filesystem to a multiple " 6044 "of 4K\n"); 6045 ERROR("-keep-as-directory\tif one source directory is " 6046 "specified, create a root\n"); 6047 ERROR("\t\t\tdirectory containing that directory, " 6048 "rather than the\n"); 6049 ERROR("\t\t\tcontents of the directory\n"); 6050 /* ANDROID CHANGES START*/ 6051 #ifdef ANDROID 6052 ERROR("-android-fs-config\tuse android fs config " 6053 "for mode, uid, and gids of inodes\n"); 6054 ERROR("-mount-point <name>\tNeed to be provided when " 6055 "android-fs-config or context-file\n\t\t\tare " 6056 "enabled and source directory is not mount point\n"); 6057 ERROR("-product-out <path>\tPRODUCT_OUT directory to " 6058 "read device specific FS rules files from\n"); 6059 ERROR("-disable-4k-align \tDon't 4k align data blocks. Default is false\n"); 6060 ERROR("-block-map <path>\tGenerate a block map for non-fragment files\n"); 6061 #endif 6062 /* ANDROID CHANGES END */ 6063 ERROR("\nFilesystem filter options:\n"); 6064 ERROR("-p <pseudo-definition>\tAdd pseudo file " 6065 "definition\n"); 6066 ERROR("-pf <pseudo-file>\tAdd list of pseudo file " 6067 "definitions\n"); 6068 ERROR("-sort <sort_file>\tsort files according to " 6069 "priorities in <sort_file>. One\n"); 6070 ERROR("\t\t\tfile or dir with priority per line. " 6071 "Priority -32768 to\n"); 6072 ERROR("\t\t\t32767, default priority 0\n"); 6073 ERROR("-ef <exclude_file>\tlist of exclude dirs/files." 6074 " One per line\n"); 6075 ERROR("-wildcards\t\tAllow extended shell wildcards " 6076 "(globbing) to be used in\n\t\t\texclude " 6077 "dirs/files\n"); 6078 ERROR("-regex\t\t\tAllow POSIX regular expressions to " 6079 "be used in exclude\n\t\t\tdirs/files\n"); 6080 ERROR("\nFilesystem append options:\n"); 6081 ERROR("-noappend\t\tdo not append to existing " 6082 "filesystem\n"); 6083 ERROR("-root-becomes <name>\twhen appending source " 6084 "files/directories, make the\n"); 6085 ERROR("\t\t\toriginal root become a subdirectory in " 6086 "the new root\n"); 6087 ERROR("\t\t\tcalled <name>, rather than adding the new " 6088 "source items\n"); 6089 ERROR("\t\t\tto the original root\n"); 6090 ERROR("\nMksquashfs runtime options:\n"); 6091 ERROR("-version\t\tprint version, licence and " 6092 "copyright message\n"); 6093 ERROR("-exit-on-error\t\ttreat normally ignored errors " 6094 "as fatal\n"); 6095 ERROR("-recover <name>\t\trecover filesystem data " 6096 "using recovery file <name>\n"); 6097 ERROR("-no-recovery\t\tdon't generate a recovery " 6098 "file\n"); 6099 ERROR("-info\t\t\tprint files written to filesystem\n"); 6100 ERROR("-no-progress\t\tdon't display the progress " 6101 "bar\n"); 6102 ERROR("-progress\t\tdisplay progress bar when using " 6103 "the -info option\n"); 6104 ERROR("-processors <number>\tUse <number> processors." 6105 " By default will use number of\n"); 6106 ERROR("\t\t\tprocessors available\n"); 6107 ERROR("-mem <size>\t\tUse <size> physical memory. " 6108 "Currently set to %dM\n", total_mem); 6109 ERROR("\t\t\tOptionally a suffix of K, M or G can be" 6110 " given to specify\n\t\t\tKbytes, Mbytes or" 6111 " Gbytes respectively\n"); 6112 ERROR("\nMiscellaneous options:\n"); 6113 ERROR("-root-owned\t\talternative name for -all-root" 6114 "\n"); 6115 ERROR("-noInodeCompression\talternative name for -noI" 6116 "\n"); 6117 ERROR("-noDataCompression\talternative name for -noD" 6118 "\n"); 6119 ERROR("-noFragmentCompression\talternative name for " 6120 "-noF\n"); 6121 ERROR("-noXattrCompression\talternative name for " 6122 "-noX\n"); 6123 ERROR("\n-Xhelp\t\t\tprint compressor options for" 6124 " selected compressor\n"); 6125 ERROR("\nCompressors available and compressor specific " 6126 "options:\n"); 6127 display_compressor_usage(COMP_DEFAULT); 6128 exit(1); 6129 } 6130 } 6131 6132 /* ANDROID CHANGES START*/ 6133 #ifdef ANDROID 6134 if (fs_config_file) { 6135 if (load_canned_fs_config(fs_config_file) < 0) { 6136 fprintf(stderr, "failed to load %s\n", fs_config_file); 6137 exit(1); 6138 } 6139 fs_config_func = canned_fs_config; 6140 } else if (mount_point) { 6141 fs_config_func = fs_config; 6142 } 6143 if (whitelist_filename) 6144 process_whitelist_file(whitelist_filename); 6145 #endif 6146 /* ANDROID CHANGES END */ 6147 6148 /* 6149 * Some compressors may need the options to be checked for validity 6150 * once all the options have been processed 6151 */ 6152 res = compressor_options_post(comp, block_size); 6153 if(res) 6154 EXIT_MKSQUASHFS(); 6155 6156 /* 6157 * If the -info option has been selected then disable the 6158 * progress bar unless it has been explicitly enabled with 6159 * the -progress option 6160 */ 6161 if(!silent) 6162 progress = force_progress; 6163 6164 #ifdef SQUASHFS_TRACE 6165 /* 6166 * Disable progress bar if full debug tracing is enabled. 6167 * The progress bar in this case just gets in the way of the 6168 * debug trace output 6169 */ 6170 progress = FALSE; 6171 #endif 6172 6173 for(i = 0; i < source; i++) 6174 if(lstat(source_path[i], &source_buf) == -1) { 6175 fprintf(stderr, "Cannot stat source directory \"%s\" " 6176 "because %s\n", source_path[i], 6177 strerror(errno)); 6178 EXIT_MKSQUASHFS(); 6179 } 6180 6181 destination_file = argv[source + 1]; 6182 if(stat(argv[source + 1], &buf) == -1) { 6183 if(errno == ENOENT) { /* Does not exist */ 6184 fd = open(argv[source + 1], O_CREAT | O_TRUNC | O_RDWR, 6185 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); 6186 if(fd == -1) { 6187 perror("Could not create destination file"); 6188 exit(1); 6189 } 6190 delete = TRUE; 6191 } else { 6192 perror("Could not stat destination file"); 6193 exit(1); 6194 } 6195 6196 } else { 6197 if(S_ISBLK(buf.st_mode)) { 6198 if((fd = open(argv[source + 1], O_RDWR)) == -1) { 6199 perror("Could not open block device as " 6200 "destination"); 6201 exit(1); 6202 } 6203 block_device = 1; 6204 6205 } else if(S_ISREG(buf.st_mode)) { 6206 fd = open(argv[source + 1], (delete ? O_TRUNC : 0) | 6207 O_RDWR); 6208 if(fd == -1) { 6209 perror("Could not open regular file for " 6210 "writing as destination"); 6211 exit(1); 6212 } 6213 } 6214 else { 6215 ERROR("Destination not block device or regular file\n"); 6216 exit(1); 6217 } 6218 6219 } 6220 6221 /* 6222 * process the exclude files - must be done afer destination file has 6223 * been possibly created 6224 */ 6225 for(i = source + 2; i < argc; i++) 6226 if(strcmp(argv[i], "-ef") == 0) 6227 /* 6228 * Note presence of filename arg has already 6229 * been checked 6230 */ 6231 process_exclude_file(argv[++i]); 6232 else if(strcmp(argv[i], "-e") == 0) 6233 break; 6234 else if(strcmp(argv[i], "-root-becomes") == 0 || 6235 strcmp(argv[i], "-sort") == 0 || 6236 strcmp(argv[i], "-pf") == 0 || 6237 strcmp(argv[i], "-af") == 0 || 6238 strcmp(argv[i], "-vaf") == 0 || 6239 strcmp(argv[i], "-comp") == 0) 6240 i++; 6241 6242 if(i != argc) { 6243 if(++i == argc) { 6244 ERROR("%s: -e missing arguments\n", argv[0]); 6245 EXIT_MKSQUASHFS(); 6246 } 6247 while(i < argc) 6248 if(old_exclude) 6249 old_add_exclude(argv[i++]); 6250 else 6251 add_exclude(argv[i++]); 6252 } 6253 6254 /* process the sort files - must be done afer the exclude files */ 6255 for(i = source + 2; i < argc; i++) 6256 if(strcmp(argv[i], "-sort") == 0) { 6257 int res = read_sort_file(argv[++i], source, 6258 source_path); 6259 if(res == FALSE) 6260 BAD_ERROR("Failed to read sort file\n"); 6261 sorted ++; 6262 } else if(strcmp(argv[i], "-e") == 0) 6263 break; 6264 else if(strcmp(argv[i], "-root-becomes") == 0 || 6265 strcmp(argv[i], "-ef") == 0 || 6266 strcmp(argv[i], "-pf") == 0 || 6267 strcmp(argv[i], "-af") == 0 || 6268 strcmp(argv[i], "-vaf") == 0 || 6269 strcmp(argv[i], "-comp") == 0) 6270 i++; 6271 6272 if(!delete) { 6273 comp = read_super(fd, &sBlk, argv[source + 1]); 6274 if(comp == NULL) { 6275 ERROR("Failed to read existing filesystem - will not " 6276 "overwrite - ABORTING!\n"); 6277 ERROR("To force Mksquashfs to write to this block " 6278 "device or file use -noappend\n"); 6279 EXIT_MKSQUASHFS(); 6280 } 6281 6282 block_log = slog(block_size = sBlk.block_size); 6283 noI = SQUASHFS_UNCOMPRESSED_INODES(sBlk.flags); 6284 noD = SQUASHFS_UNCOMPRESSED_DATA(sBlk.flags); 6285 noF = SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.flags); 6286 noX = SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.flags); 6287 no_fragments = SQUASHFS_NO_FRAGMENTS(sBlk.flags); 6288 always_use_fragments = SQUASHFS_ALWAYS_FRAGMENTS(sBlk.flags); 6289 duplicate_checking = SQUASHFS_DUPLICATES(sBlk.flags); 6290 exportable = SQUASHFS_EXPORTABLE(sBlk.flags); 6291 no_xattrs = SQUASHFS_NO_XATTRS(sBlk.flags); 6292 comp_opts = SQUASHFS_COMP_OPTS(sBlk.flags); 6293 } 6294 6295 initialise_threads(readq, fragq, bwriteq, fwriteq, delete, 6296 destination_file); 6297 6298 res = compressor_init(comp, &stream, SQUASHFS_METADATA_SIZE, 0); 6299 if(res) 6300 BAD_ERROR("compressor_init failed\n"); 6301 6302 if(delete) { 6303 int size; 6304 void *comp_data = compressor_dump_options(comp, block_size, 6305 &size); 6306 6307 printf("Creating %d.%d filesystem on %s, block size %d.\n", 6308 SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1], block_size); 6309 6310 /* 6311 * store any compressor specific options after the superblock, 6312 * and set the COMP_OPT flag to show that the filesystem has 6313 * compressor specfic options 6314 */ 6315 if(comp_data) { 6316 unsigned short c_byte = size | SQUASHFS_COMPRESSED_BIT; 6317 6318 SQUASHFS_INSWAP_SHORTS(&c_byte, 1); 6319 write_destination(fd, sizeof(struct squashfs_super_block), 6320 sizeof(c_byte), &c_byte); 6321 write_destination(fd, sizeof(struct squashfs_super_block) + 6322 sizeof(c_byte), size, comp_data); 6323 bytes = sizeof(struct squashfs_super_block) + sizeof(c_byte) 6324 + size; 6325 comp_opts = TRUE; 6326 } else 6327 bytes = sizeof(struct squashfs_super_block); 6328 } else { 6329 unsigned int last_directory_block, inode_dir_offset, 6330 inode_dir_file_size, root_inode_size, 6331 inode_dir_start_block, uncompressed_data, 6332 compressed_data, inode_dir_inode_number, 6333 inode_dir_parent_inode; 6334 unsigned int root_inode_start = 6335 SQUASHFS_INODE_BLK(sBlk.root_inode), 6336 root_inode_offset = 6337 SQUASHFS_INODE_OFFSET(sBlk.root_inode); 6338 6339 if((bytes = read_filesystem(root_name, fd, &sBlk, &inode_table, 6340 &data_cache, &directory_table, 6341 &directory_data_cache, &last_directory_block, 6342 &inode_dir_offset, &inode_dir_file_size, 6343 &root_inode_size, &inode_dir_start_block, 6344 &file_count, &sym_count, &dev_count, &dir_count, 6345 &fifo_count, &sock_count, &total_bytes, 6346 &total_inode_bytes, &total_directory_bytes, 6347 &inode_dir_inode_number, 6348 &inode_dir_parent_inode, add_old_root_entry, 6349 &fragment_table, &inode_lookup_table)) == 0) { 6350 ERROR("Failed to read existing filesystem - will not " 6351 "overwrite - ABORTING!\n"); 6352 ERROR("To force Mksquashfs to write to this block " 6353 "device or file use -noappend\n"); 6354 EXIT_MKSQUASHFS(); 6355 } 6356 if((append_fragments = fragments = sBlk.fragments)) { 6357 fragment_table = realloc((char *) fragment_table, 6358 ((fragments + FRAG_SIZE - 1) & ~(FRAG_SIZE - 1)) 6359 * sizeof(struct squashfs_fragment_entry)); 6360 if(fragment_table == NULL) 6361 BAD_ERROR("Out of memory in save filesystem state\n"); 6362 } 6363 6364 printf("Appending to existing %d.%d filesystem on %s, block " 6365 "size %d\n", SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1], 6366 block_size); 6367 printf("All -b, -noI, -noD, -noF, -noX, no-duplicates, no-fragments, " 6368 "-always-use-fragments,\n-exportable and -comp options " 6369 "ignored\n"); 6370 printf("\nIf appending is not wanted, please re-run with " 6371 "-noappend specified!\n\n"); 6372 6373 compressed_data = (inode_dir_offset + inode_dir_file_size) & 6374 ~(SQUASHFS_METADATA_SIZE - 1); 6375 uncompressed_data = (inode_dir_offset + inode_dir_file_size) & 6376 (SQUASHFS_METADATA_SIZE - 1); 6377 6378 /* save original filesystem state for restoring ... */ 6379 sfragments = fragments; 6380 sbytes = bytes; 6381 sinode_count = sBlk.inodes; 6382 scache_bytes = root_inode_offset + root_inode_size; 6383 sdirectory_cache_bytes = uncompressed_data; 6384 sdata_cache = malloc(scache_bytes); 6385 if(sdata_cache == NULL) 6386 BAD_ERROR("Out of memory in save filesystem state\n"); 6387 sdirectory_data_cache = malloc(sdirectory_cache_bytes); 6388 if(sdirectory_data_cache == NULL) 6389 BAD_ERROR("Out of memory in save filesystem state\n"); 6390 memcpy(sdata_cache, data_cache, scache_bytes); 6391 memcpy(sdirectory_data_cache, directory_data_cache + 6392 compressed_data, sdirectory_cache_bytes); 6393 sinode_bytes = root_inode_start; 6394 stotal_bytes = total_bytes; 6395 stotal_inode_bytes = total_inode_bytes; 6396 stotal_directory_bytes = total_directory_bytes + 6397 compressed_data; 6398 sfile_count = file_count; 6399 ssym_count = sym_count; 6400 sdev_count = dev_count; 6401 sdir_count = dir_count + 1; 6402 sfifo_count = fifo_count; 6403 ssock_count = sock_count; 6404 sdup_files = dup_files; 6405 sid_count = id_count; 6406 write_recovery_data(&sBlk); 6407 save_xattrs(); 6408 appending = TRUE; 6409 6410 /* 6411 * set the filesystem state up to be able to append to the 6412 * original filesystem. The filesystem state differs depending 6413 * on whether we're appending to the original root directory, or 6414 * if the original root directory becomes a sub-directory 6415 * (root-becomes specified on command line, here root_name != 6416 * NULL) 6417 */ 6418 inode_bytes = inode_size = root_inode_start; 6419 directory_size = last_directory_block; 6420 cache_size = root_inode_offset + root_inode_size; 6421 directory_cache_size = inode_dir_offset + inode_dir_file_size; 6422 if(root_name) { 6423 sdirectory_bytes = last_directory_block; 6424 sdirectory_compressed_bytes = 0; 6425 root_inode_number = inode_dir_parent_inode; 6426 inode_no = sBlk.inodes + 2; 6427 directory_bytes = last_directory_block; 6428 directory_cache_bytes = uncompressed_data; 6429 memmove(directory_data_cache, directory_data_cache + 6430 compressed_data, uncompressed_data); 6431 cache_bytes = root_inode_offset + root_inode_size; 6432 add_old_root_entry(root_name, sBlk.root_inode, 6433 inode_dir_inode_number, SQUASHFS_DIR_TYPE); 6434 total_directory_bytes += compressed_data; 6435 dir_count ++; 6436 } else { 6437 sdirectory_compressed_bytes = last_directory_block - 6438 inode_dir_start_block; 6439 sdirectory_compressed = 6440 malloc(sdirectory_compressed_bytes); 6441 if(sdirectory_compressed == NULL) 6442 BAD_ERROR("Out of memory in save filesystem " 6443 "state\n"); 6444 memcpy(sdirectory_compressed, directory_table + 6445 inode_dir_start_block, 6446 sdirectory_compressed_bytes); 6447 sdirectory_bytes = inode_dir_start_block; 6448 root_inode_number = inode_dir_inode_number; 6449 inode_no = sBlk.inodes + 1; 6450 directory_bytes = inode_dir_start_block; 6451 directory_cache_bytes = inode_dir_offset; 6452 cache_bytes = root_inode_offset; 6453 } 6454 6455 inode_count = file_count + dir_count + sym_count + dev_count + 6456 fifo_count + sock_count; 6457 } 6458 6459 if(path) 6460 paths = add_subdir(paths, path); 6461 6462 dump_actions(); 6463 dump_pseudos(); 6464 6465 if(delete && !keep_as_directory && source == 1 && 6466 S_ISDIR(source_buf.st_mode)) 6467 dir_scan(&inode, source_path[0], scan1_readdir, progress); 6468 else if(!keep_as_directory && source == 1 && 6469 S_ISDIR(source_buf.st_mode)) 6470 dir_scan(&inode, source_path[0], scan1_single_readdir, progress); 6471 else 6472 dir_scan(&inode, "", scan1_encomp_readdir, progress); 6473 sBlk.root_inode = inode; 6474 sBlk.inodes = inode_count; 6475 sBlk.s_magic = SQUASHFS_MAGIC; 6476 sBlk.s_major = SQUASHFS_MAJOR; 6477 sBlk.s_minor = SQUASHFS_MINOR; 6478 sBlk.block_size = block_size; 6479 sBlk.block_log = block_log; 6480 sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, no_fragments, 6481 always_use_fragments, duplicate_checking, exportable, 6482 no_xattrs, comp_opts); 6483 sBlk.mkfs_time = time(NULL); 6484 6485 disable_info(); 6486 6487 while((fragment = get_frag_action(fragment))) 6488 write_fragment(*fragment); 6489 unlock_fragments(); 6490 pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); 6491 pthread_mutex_lock(&fragment_mutex); 6492 while(fragments_outstanding) { 6493 pthread_mutex_unlock(&fragment_mutex); 6494 sched_yield(); 6495 pthread_mutex_lock(&fragment_mutex); 6496 } 6497 pthread_cleanup_pop(1); 6498 6499 queue_put(to_writer, NULL); 6500 if(queue_get(from_writer) != 0) 6501 EXIT_MKSQUASHFS(); 6502 6503 set_progressbar_state(FALSE); 6504 write_filesystem_tables(&sBlk, nopad); 6505 6506 /* ANDROID CHANGES START*/ 6507 #ifdef ANDROID 6508 if (block_map_file) 6509 fclose(block_map_file); 6510 #endif 6511 /* ANDROID CHANGES END */ 6512 6513 return 0; 6514 } 6515