Home | History | Annotate | Download | only in zip
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package com.android.tools.build.apkzlib.zip;
     18 
     19 import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
     20 import com.android.tools.build.apkzlib.zip.utils.CloseableDelegateByteSource;
     21 import com.google.common.annotations.VisibleForTesting;
     22 import com.google.common.base.Preconditions;
     23 import com.google.common.base.Verify;
     24 import com.google.common.io.ByteSource;
     25 import com.google.common.io.ByteStreams;
     26 import com.google.common.primitives.Ints;
     27 import java.io.BufferedInputStream;
     28 import java.io.IOException;
     29 import java.io.InputStream;
     30 import java.nio.ByteBuffer;
     31 import java.util.Comparator;
     32 import javax.annotation.Nonnull;
     33 import javax.annotation.Nullable;
     34 
     35 /**
     36  * A stored entry represents a file in the zip. The entry may or may not be written to the zip
     37  * file.
     38  *
     39  * <p>Stored entries provide the operations that are related to the files themselves, not to the
     40  * zip. It is through the {@code StoredEntry} class that entries can be deleted ({@link #delete()},
     41  * open ({@link #open()}) or realigned ({@link #realign()}).
     42  *
     43  * <p>Entries are not created directly. They are created using
     44  * {@link ZFile#add(String, InputStream, boolean)} and obtained from the zip file
     45  * using {@link ZFile#get(String)} or {@link ZFile#entries()}.
     46  *
     47  * <p>Most of the data in the an entry is in the Central Directory Header. This includes the name,
     48  * compression method, file compressed and uncompressed sizes, CRC32 checksum, etc. The CDH can
     49  * be obtained using the {@link #getCentralDirectoryHeader()} method.
     50  */
     51 public class StoredEntry {
     52 
     53     /**
     54      * Comparator that compares instances of {@link StoredEntry} by their names.
     55      */
     56     static final Comparator<StoredEntry> COMPARE_BY_NAME =
     57             (o1, o2) -> {
     58                 if (o1 == null && o2 == null) {
     59                     return 0;
     60                 }
     61 
     62                 if (o1 == null) {
     63                     return -1;
     64                 }
     65 
     66                 if (o2 == null) {
     67                     return 1;
     68                 }
     69 
     70                 String name1 = o1.getCentralDirectoryHeader().getName();
     71                 String name2 = o2.getCentralDirectoryHeader().getName();
     72                 return name1.compareTo(name2);
     73             };
     74 
     75     /**
     76      * Signature of the data descriptor.
     77      */
     78     private static final int DATA_DESC_SIGNATURE = 0x08074b50;
     79 
     80     /**
     81      * Local header field: signature.
     82      */
     83     private static final ZipField.F4 F_LOCAL_SIGNATURE = new ZipField.F4(0, 0x04034b50,
     84             "Signature");
     85 
     86     /**
     87      * Local header field: version to extract, should match the CDH's.
     88      */
     89     @VisibleForTesting
     90     static final ZipField.F2 F_VERSION_EXTRACT = new ZipField.F2(
     91             F_LOCAL_SIGNATURE.endOffset(), "Version to extract",
     92             new ZipFieldInvariantNonNegative());
     93 
     94     /**
     95      * Local header field: GP bit flag, should match the CDH's.
     96      */
     97     private static final ZipField.F2 F_GP_BIT = new ZipField.F2(F_VERSION_EXTRACT.endOffset(),
     98             "GP bit flag");
     99 
    100     /**
    101      * Local header field: compression method, should match the CDH's.
    102      */
    103     private static final ZipField.F2 F_METHOD = new ZipField.F2(F_GP_BIT.endOffset(),
    104             "Compression method", new ZipFieldInvariantNonNegative());
    105 
    106     /**
    107      * Local header field: last modification time, should match the CDH's.
    108      */
    109     private static final ZipField.F2 F_LAST_MOD_TIME = new ZipField.F2(F_METHOD.endOffset(),
    110             "Last modification time");
    111 
    112     /**
    113      * Local header field: last modification time, should match the CDH's.
    114      */
    115     private static final ZipField.F2 F_LAST_MOD_DATE = new ZipField.F2(F_LAST_MOD_TIME.endOffset(),
    116             "Last modification date");
    117 
    118     /**
    119      * Local header field: CRC32 checksum, should match the CDH's. 0 if there is no data.
    120      */
    121     private static final ZipField.F4 F_CRC32 = new ZipField.F4(F_LAST_MOD_DATE.endOffset(),
    122             "CRC32");
    123 
    124     /**
    125      * Local header field: compressed size, size the data takes in the zip file.
    126      */
    127     private static final ZipField.F4 F_COMPRESSED_SIZE = new ZipField.F4(F_CRC32.endOffset(),
    128             "Compressed size", new ZipFieldInvariantNonNegative());
    129 
    130     /**
    131      * Local header field: uncompressed size, size the data takes after extraction.
    132      */
    133     private static final ZipField.F4 F_UNCOMPRESSED_SIZE = new ZipField.F4(
    134             F_COMPRESSED_SIZE.endOffset(), "Uncompressed size", new ZipFieldInvariantNonNegative());
    135 
    136     /**
    137      * Local header field: length of the file name.
    138      */
    139     private static final ZipField.F2 F_FILE_NAME_LENGTH = new ZipField.F2(
    140             F_UNCOMPRESSED_SIZE.endOffset(), "@File name length",
    141             new ZipFieldInvariantNonNegative());
    142 
    143     /**
    144      * Local header filed: length of the extra field.
    145      */
    146     private static final ZipField.F2 F_EXTRA_LENGTH = new ZipField.F2(
    147             F_FILE_NAME_LENGTH.endOffset(), "Extra length", new ZipFieldInvariantNonNegative());
    148 
    149     /**
    150      * Local header size (fixed part, not counting file name or extra field).
    151      */
    152     static final int FIXED_LOCAL_FILE_HEADER_SIZE = F_EXTRA_LENGTH.endOffset();
    153 
    154     /**
    155      * Type of entry.
    156      */
    157     @Nonnull
    158     private StoredEntryType type;
    159 
    160     /**
    161      * The central directory header with information about the file.
    162      */
    163     @Nonnull
    164     private CentralDirectoryHeader cdh;
    165 
    166     /**
    167      * The file this entry is associated with
    168      */
    169     @Nonnull
    170     private ZFile file;
    171 
    172     /**
    173      * Has this entry been deleted?
    174      */
    175     private boolean deleted;
    176 
    177     /**
    178      * Extra field specified in the local directory.
    179      */
    180     @Nonnull
    181     private ExtraField localExtra;
    182 
    183     /**
    184      * Type of data descriptor associated with the entry.
    185      */
    186     @Nonnull
    187     private DataDescriptorType dataDescriptorType;
    188 
    189     /**
    190      * Source for this entry's data. If this entry is a directory, this source has to have zero
    191      * size.
    192      */
    193     @Nonnull
    194     private ProcessedAndRawByteSources source;
    195 
    196     /**
    197      * Verify log for the entry.
    198      */
    199     @Nonnull
    200     private final VerifyLog verifyLog;
    201 
    202     /**
    203      * Creates a new stored entry.
    204      *
    205      * @param header the header with the entry information; if the header does not contain an
    206      * offset it means that this entry is not yet written in the zip file
    207      * @param file the zip file containing the entry
    208      * @param source the entry's data source; it can be {@code null} only if the source can be
    209      * read from the zip file, that is, if {@code header.getOffset()} is non-negative
    210      * @throws IOException failed to create the entry
    211      */
    212     StoredEntry(
    213             @Nonnull CentralDirectoryHeader header,
    214             @Nonnull ZFile file,
    215             @Nullable ProcessedAndRawByteSources source)
    216             throws IOException {
    217         cdh = header;
    218         this.file = file;
    219         deleted = false;
    220         verifyLog = file.makeVerifyLog();
    221 
    222         if (header.getOffset() >= 0) {
    223             /*
    224              * This will be overwritten during readLocalHeader. However, IJ complains if we don't
    225              * assign a value to localExtra because of the @Nonnull annotation.
    226              */
    227             localExtra = new ExtraField();
    228 
    229             readLocalHeader();
    230 
    231             Preconditions.checkArgument(
    232                     source == null,
    233                     "Source was defined but contents already exist on file.");
    234 
    235             /*
    236              * Since the file is already in the zip, dynamically create a source that will read
    237              * the file from the zip when needed. The assignment is not really needed, but we
    238              * would get a warning because of the @NotNull otherwise.
    239              */
    240             this.source = createSourceFromZip(cdh.getOffset());
    241         } else {
    242             /*
    243              * There is no local extra data for new files.
    244              */
    245             localExtra = new ExtraField();
    246 
    247             Preconditions.checkNotNull(
    248                     source,
    249                     "Source was not defined, but contents are not on file.");
    250             this.source = source;
    251         }
    252 
    253         /*
    254          * It seems that zip utilities store directories as names ending with "/".
    255          * This seems to be respected by all zip utilities although I could not find there anywhere
    256          * in the specification.
    257          */
    258         if (cdh.getName().endsWith(Character.toString(ZFile.SEPARATOR))) {
    259             type = StoredEntryType.DIRECTORY;
    260             verifyLog.verify(
    261                     this.source.getProcessedByteSource().isEmpty(),
    262                     "Directory source is not empty.");
    263             verifyLog.verify(cdh.getCrc32() == 0, "Directory has CRC32 = %s.", cdh.getCrc32());
    264             verifyLog.verify(
    265                     cdh.getUncompressedSize() == 0,
    266                     "Directory has uncompressed size = %s.",
    267                     cdh.getUncompressedSize());
    268 
    269             /*
    270              * Some clever (OMG!) tools, like jar will actually try to compress the directory
    271              * contents and generate a 2 byte compressed data. Of course, the uncompressed size is
    272              * zero and we're just wasting space.
    273              */
    274             long compressedSize = cdh.getCompressionInfoWithWait().getCompressedSize();
    275             verifyLog.verify(
    276                     compressedSize == 0 || compressedSize == 2,
    277                     "Directory has compressed size = %s.", compressedSize);
    278         } else {
    279             type = StoredEntryType.FILE;
    280         }
    281 
    282         /*
    283          * By default we assume there is no data descriptor unless the CRC is marked as deferred
    284          * in the header's GP Bit.
    285          */
    286         dataDescriptorType = DataDescriptorType.NO_DATA_DESCRIPTOR;
    287         if (header.getGpBit().isDeferredCrc()) {
    288             /*
    289              * If the deferred CRC bit exists, then we have an extra descriptor field. This extra
    290              * field may have a signature.
    291              */
    292             Verify.verify(header.getOffset() >= 0, "Files that are not on disk cannot have the "
    293                     + "deferred CRC bit set.");
    294 
    295             try {
    296                 readDataDescriptorRecord();
    297             } catch (IOException e) {
    298                 throw new IOException("Failed to read data descriptor record.", e);
    299             }
    300         }
    301     }
    302 
    303     /**
    304      * Obtains the size of the local header of this entry.
    305      *
    306      * @return the local header size in bytes
    307      */
    308     public int getLocalHeaderSize() {
    309         Preconditions.checkState(!deleted, "deleted");
    310         return FIXED_LOCAL_FILE_HEADER_SIZE + cdh.getEncodedFileName().length + localExtra.size();
    311     }
    312 
    313     /**
    314      * Obtains the size of the whole entry on disk, including local header and data descriptor.
    315      * This method will wait until compression information is complete, if needed.
    316      *
    317      * @return the number of bytes
    318      * @throws IOException failed to get compression information
    319      */
    320     long getInFileSize() throws IOException {
    321         Preconditions.checkState(!deleted, "deleted");
    322         return cdh.getCompressionInfoWithWait().getCompressedSize() + getLocalHeaderSize()
    323                 + dataDescriptorType.size;
    324     }
    325 
    326     /**
    327      * Obtains a stream that allows reading from the entry.
    328      *
    329      * @return a stream that will return as many bytes as the uncompressed entry size
    330      * @throws IOException failed to open the stream
    331      */
    332     @Nonnull
    333     public InputStream open() throws IOException {
    334         return source.getProcessedByteSource().openStream();
    335     }
    336 
    337     /**
    338      * Obtains the contents of the file.
    339      *
    340      * @return a byte array with the contents of the file (uncompressed if the file was compressed)
    341      * @throws IOException failed to read the file
    342      */
    343     @Nonnull
    344     public byte[] read() throws IOException {
    345         try (InputStream is = open()) {
    346             return ByteStreams.toByteArray(is);
    347         }
    348     }
    349 
    350     /**
    351      * Obtains the contents of the file in an existing buffer.
    352      *
    353      * @param bytes buffer to read the file contents in.
    354      * @return the number of bytes read
    355      * @throws IOException failed to read the file.
    356      */
    357     public int read(byte[] bytes) throws IOException {
    358         if (bytes.length < getCentralDirectoryHeader().getUncompressedSize()) {
    359             throw new RuntimeException(
    360                     "Buffer to small while reading {}" + getCentralDirectoryHeader().getName());
    361         }
    362         try (InputStream is = new BufferedInputStream(open())) {
    363             return ByteStreams.read(is, bytes, 0, bytes.length);
    364         }
    365     }
    366 
    367     /**
    368      * Obtains the type of entry.
    369      *
    370      * @return the type of entry
    371      */
    372     @Nonnull
    373     public StoredEntryType getType() {
    374         Preconditions.checkState(!deleted, "deleted");
    375         return type;
    376     }
    377 
    378     /**
    379      * Deletes this entry from the zip file. Invoking this method doesn't update the zip itself.
    380      * To eventually write updates to disk, {@link ZFile#update()} must be called.
    381      *
    382      * @throws IOException failed to delete the entry
    383      * @throws IllegalStateException if the zip file was open in read-only mode
    384      */
    385     public void delete() throws IOException {
    386         delete(true);
    387     }
    388 
    389     /**
    390      * Deletes this entry from the zip file. Invoking this method doesn't update the zip itself.
    391      * To eventually write updates to disk, {@link ZFile#update()} must be called.
    392      *
    393      * @param notify should listeners be notified of the deletion? This will only be
    394      * {@code false} if the entry is being removed as part of a replacement
    395      * @throws IOException failed to delete the entry
    396      * @throws IllegalStateException if the zip file was open in read-only mode
    397      */
    398     void delete(boolean notify) throws IOException {
    399         Preconditions.checkState(!deleted, "deleted");
    400         file.delete(this, notify);
    401         deleted = true;
    402         source.close();
    403     }
    404 
    405     /**
    406      * Returns {@code true} if this entry has been deleted/replaced.
    407      */
    408     public boolean isDeleted() {
    409         return deleted;
    410     }
    411 
    412     /**
    413      * Obtains the CDH associated with this entry.
    414      *
    415      * @return the CDH
    416      */
    417     @Nonnull
    418     public CentralDirectoryHeader getCentralDirectoryHeader() {
    419         return cdh;
    420     }
    421 
    422     /**
    423      * Reads the file's local header and verifies that it matches the Central Directory
    424      * Header provided in the constructor. This method should only be called if the entry already
    425      * exists on disk; new entries do not have local headers.
    426      * <p>
    427      * This method will define the {@link #localExtra} field that is only defined in the
    428      * local descriptor.
    429      *
    430      * @throws IOException failed to read the local header
    431      */
    432     private void readLocalHeader() throws IOException {
    433         byte[] localHeader = new byte[FIXED_LOCAL_FILE_HEADER_SIZE];
    434         file.directFullyRead(cdh.getOffset(), localHeader);
    435 
    436         CentralDirectoryHeaderCompressInfo compressInfo = cdh.getCompressionInfoWithWait();
    437 
    438         ByteBuffer bytes = ByteBuffer.wrap(localHeader);
    439         F_LOCAL_SIGNATURE.verify(bytes);
    440         F_VERSION_EXTRACT.verify(bytes, compressInfo.getVersionExtract(), verifyLog);
    441         F_GP_BIT.verify(bytes, cdh.getGpBit().getValue(), verifyLog);
    442         F_METHOD.verify(bytes, compressInfo.getMethod().methodCode, verifyLog);
    443 
    444         if (file.areTimestampsIgnored()) {
    445             F_LAST_MOD_TIME.skip(bytes);
    446             F_LAST_MOD_DATE.skip(bytes);
    447         } else {
    448             F_LAST_MOD_TIME.verify(bytes, cdh.getLastModTime(), verifyLog);
    449             F_LAST_MOD_DATE.verify(bytes, cdh.getLastModDate(), verifyLog);
    450         }
    451 
    452         /*
    453          * If CRC-32, compressed size and uncompressed size are deferred, their values in Local
    454          * File Header must be ignored and their actual values must be read from the Data
    455          * Descriptor following the contents of this entry. See readDataDescriptorRecord().
    456          */
    457         if (cdh.getGpBit().isDeferredCrc()) {
    458             F_CRC32.skip(bytes);
    459             F_COMPRESSED_SIZE.skip(bytes);
    460             F_UNCOMPRESSED_SIZE.skip(bytes);
    461         } else {
    462             F_CRC32.verify(bytes, cdh.getCrc32(), verifyLog);
    463             F_COMPRESSED_SIZE.verify(bytes, compressInfo.getCompressedSize(), verifyLog);
    464             F_UNCOMPRESSED_SIZE.verify(bytes, cdh.getUncompressedSize(), verifyLog);
    465         }
    466 
    467         F_FILE_NAME_LENGTH.verify(bytes, cdh.getEncodedFileName().length);
    468         long extraLength = F_EXTRA_LENGTH.read(bytes);
    469         long fileNameStart = cdh.getOffset() + F_EXTRA_LENGTH.endOffset();
    470         byte[] fileNameData = new byte[cdh.getEncodedFileName().length];
    471         file.directFullyRead(fileNameStart, fileNameData);
    472 
    473         String fileName = EncodeUtils.decode(fileNameData, cdh.getGpBit());
    474         if (!fileName.equals(cdh.getName())) {
    475             verifyLog.log(
    476                     String.format(
    477                             "Central directory reports file as being named '%s' but local header"
    478                                     + "reports file being named '%s'.",
    479                     cdh.getName(),
    480                     fileName));
    481         }
    482 
    483         long localExtraStart = fileNameStart + cdh.getEncodedFileName().length;
    484         byte[] localExtraRaw = new byte[Ints.checkedCast(extraLength)];
    485         file.directFullyRead(localExtraStart, localExtraRaw);
    486         localExtra = new ExtraField(localExtraRaw);
    487     }
    488 
    489     /**
    490      * Reads the data descriptor record. This method can only be invoked once it is established
    491      * that a data descriptor does exist. It will read the data descriptor and check that the data
    492      * described there matches the data provided in the Central Directory.
    493      * <p>
    494      * This method will set the {@link #dataDescriptorType} field to the appropriate type of
    495      * data descriptor record.
    496      *
    497      * @throws IOException failed to read the data descriptor record
    498      */
    499     private void readDataDescriptorRecord() throws IOException {
    500         CentralDirectoryHeaderCompressInfo compressInfo = cdh.getCompressionInfoWithWait();
    501 
    502         long ddStart = cdh.getOffset() + FIXED_LOCAL_FILE_HEADER_SIZE
    503                 + cdh.getName().length() + localExtra.size() + compressInfo.getCompressedSize();
    504         byte[] ddData = new byte[DataDescriptorType.DATA_DESCRIPTOR_WITH_SIGNATURE.size];
    505         file.directFullyRead(ddStart, ddData);
    506 
    507         ByteBuffer ddBytes = ByteBuffer.wrap(ddData);
    508 
    509         ZipField.F4 signatureField = new ZipField.F4(0, "Data descriptor signature");
    510         int cpos = ddBytes.position();
    511         long sig = signatureField.read(ddBytes);
    512         if (sig == DATA_DESC_SIGNATURE) {
    513             dataDescriptorType = DataDescriptorType.DATA_DESCRIPTOR_WITH_SIGNATURE;
    514         } else {
    515             dataDescriptorType = DataDescriptorType.DATA_DESCRIPTOR_WITHOUT_SIGNATURE;
    516             ddBytes.position(cpos);
    517         }
    518 
    519         ZipField.F4 crc32Field = new ZipField.F4(0, "CRC32");
    520         ZipField.F4 compressedField = new ZipField.F4(crc32Field.endOffset(), "Compressed size");
    521         ZipField.F4 uncompressedField = new ZipField.F4(compressedField.endOffset(),
    522                 "Uncompressed size");
    523 
    524         crc32Field.verify(ddBytes, cdh.getCrc32(), verifyLog);
    525         compressedField.verify(ddBytes, compressInfo.getCompressedSize(), verifyLog);
    526         uncompressedField.verify(ddBytes, cdh.getUncompressedSize(), verifyLog);
    527     }
    528 
    529     /**
    530      * Creates a new source that reads data from the zip.
    531      *
    532      * @param zipOffset the offset into the zip file where the data is, must be non-negative
    533      * @throws IOException failed to close the old source
    534      * @return the created source
    535      */
    536     @Nonnull
    537     private ProcessedAndRawByteSources createSourceFromZip(final long zipOffset)
    538             throws IOException {
    539         Preconditions.checkArgument(zipOffset >= 0, "zipOffset < 0");
    540 
    541         final CentralDirectoryHeaderCompressInfo compressInfo;
    542         try {
    543             compressInfo = cdh.getCompressionInfoWithWait();
    544         } catch (IOException e) {
    545             throw new RuntimeException("IOException should never occur here because compression "
    546                     + "information should be immediately available if reading from zip.", e);
    547         }
    548 
    549         /*
    550          * Create a source that will return whatever is on the zip file.
    551          */
    552         CloseableByteSource rawContents = new CloseableByteSource() {
    553             @Override
    554             public long size() throws IOException {
    555                 return compressInfo.getCompressedSize();
    556             }
    557 
    558             @Nonnull
    559             @Override
    560             public InputStream openStream() throws IOException {
    561                 Preconditions.checkState(!deleted, "deleted");
    562 
    563                 long dataStart = zipOffset + getLocalHeaderSize();
    564                 long dataEnd = dataStart + compressInfo.getCompressedSize();
    565 
    566                 file.openReadOnly();
    567                 return file.directOpen(dataStart, dataEnd);
    568             }
    569 
    570             @Override
    571             protected void innerClose() throws IOException {
    572                 /*
    573                  * Nothing to do here.
    574                  */
    575             }
    576         };
    577 
    578         return createSourcesFromRawContents(rawContents);
    579     }
    580 
    581     /**
    582      * Creates a {@link ProcessedAndRawByteSources} from the raw data source . The processed source
    583      * will either inflate or do nothing depending on the compression information that, at this
    584      * point, should already be available
    585      *
    586      * @param rawContents the raw data to create the source from
    587      * @return the sources for this entry
    588      */
    589     @Nonnull
    590     private ProcessedAndRawByteSources createSourcesFromRawContents(
    591             @Nonnull CloseableByteSource rawContents) {
    592         CentralDirectoryHeaderCompressInfo compressInfo;
    593         try {
    594             compressInfo = cdh.getCompressionInfoWithWait();
    595         } catch (IOException e) {
    596             throw new RuntimeException("IOException should never occur here because compression "
    597                     + "information should be immediately available if creating from raw "
    598                     + "contents.", e);
    599         }
    600 
    601         CloseableByteSource contents;
    602 
    603         /*
    604          * If the contents are deflated, wrap that source in an inflater source so we get the
    605          * uncompressed data.
    606          */
    607         if (compressInfo.getMethod() == CompressionMethod.DEFLATE) {
    608             contents = new InflaterByteSource(rawContents);
    609         } else {
    610             contents = rawContents;
    611         }
    612 
    613         return new ProcessedAndRawByteSources(contents, rawContents);
    614     }
    615 
    616     /**
    617      * Replaces {@link #source} with one that reads file data from the zip file.
    618      *
    619      * @param zipFileOffset the offset in the zip file where data is written; must be non-negative
    620      * @throws IOException failed to replace the source
    621      */
    622     void replaceSourceFromZip(long zipFileOffset) throws IOException {
    623         Preconditions.checkArgument(zipFileOffset >= 0, "zipFileOffset < 0");
    624 
    625         ProcessedAndRawByteSources oldSource = source;
    626         source = createSourceFromZip(zipFileOffset);
    627         cdh.setOffset(zipFileOffset);
    628         oldSource.close();
    629     }
    630 
    631     /**
    632      * Loads all data in memory and replaces {@link #source} with one that contains all the data
    633      * in memory.
    634      *
    635      * <p>If the entry's contents are already in memory, this call does nothing.
    636      *
    637      * @throws IOException failed to replace the source
    638      */
    639     void loadSourceIntoMemory() throws IOException {
    640         if (cdh.getOffset() == -1) {
    641             /*
    642              * No offset in the CDR means data has not been written to disk which, in turn,
    643              * means data is already loaded into memory.
    644              */
    645             return;
    646         }
    647 
    648         ProcessedAndRawByteSources oldSource = source;
    649         byte[] rawContents = oldSource.getRawByteSource().read();
    650         source = createSourcesFromRawContents(new CloseableDelegateByteSource(
    651                 ByteSource.wrap(rawContents), rawContents.length));
    652         cdh.setOffset(-1);
    653         oldSource.close();
    654     }
    655 
    656     /**
    657      * Obtains the source data for this entry. This method can only be called for files, it
    658      * cannot be called for directories.
    659      *
    660      * @return the entry source
    661      */
    662     @Nonnull
    663     ProcessedAndRawByteSources getSource() {
    664         return source;
    665     }
    666 
    667     /**
    668      * Obtains the type of data descriptor used in the entry.
    669      *
    670      * @return the type of data descriptor
    671      */
    672     @Nonnull
    673     public DataDescriptorType getDataDescriptorType() {
    674         return dataDescriptorType;
    675     }
    676 
    677     /**
    678      * Removes the data descriptor, if it has one and resets the data descriptor bit in the
    679      * central directory header.
    680      *
    681      * @return was the data descriptor remove?
    682      */
    683     boolean removeDataDescriptor() {
    684         if (dataDescriptorType == DataDescriptorType.NO_DATA_DESCRIPTOR) {
    685             return false;
    686         }
    687 
    688         dataDescriptorType = DataDescriptorType.NO_DATA_DESCRIPTOR;
    689         cdh.resetDeferredCrc();
    690         return true;
    691     }
    692 
    693     /**
    694      * Obtains the local header data.
    695      *
    696      * @return the header data
    697      * @throws IOException failed to get header byte data
    698      */
    699     @Nonnull
    700     byte[] toHeaderData() throws IOException {
    701 
    702         byte[] encodedFileName = cdh.getEncodedFileName();
    703 
    704         ByteBuffer out =
    705                 ByteBuffer.allocate(
    706                         F_EXTRA_LENGTH.endOffset() + encodedFileName.length + localExtra.size());
    707 
    708         CentralDirectoryHeaderCompressInfo compressInfo = cdh.getCompressionInfoWithWait();
    709 
    710         F_LOCAL_SIGNATURE.write(out);
    711         F_VERSION_EXTRACT.write(out, compressInfo.getVersionExtract());
    712         F_GP_BIT.write(out, cdh.getGpBit().getValue());
    713         F_METHOD.write(out, compressInfo.getMethod().methodCode);
    714 
    715         if (file.areTimestampsIgnored()) {
    716             F_LAST_MOD_TIME.write(out, 0);
    717             F_LAST_MOD_DATE.write(out, 0);
    718         } else {
    719             F_LAST_MOD_TIME.write(out, cdh.getLastModTime());
    720             F_LAST_MOD_DATE.write(out, cdh.getLastModDate());
    721         }
    722 
    723         F_CRC32.write(out, cdh.getCrc32());
    724         F_COMPRESSED_SIZE.write(out, compressInfo.getCompressedSize());
    725         F_UNCOMPRESSED_SIZE.write(out, cdh.getUncompressedSize());
    726         F_FILE_NAME_LENGTH.write(out, cdh.getEncodedFileName().length);
    727         F_EXTRA_LENGTH.write(out, localExtra.size());
    728 
    729         out.put(cdh.getEncodedFileName());
    730         localExtra.write(out);
    731 
    732         return out.array();
    733     }
    734 
    735     /**
    736      * Requests that this entry be realigned. If this entry is already aligned according to the
    737      * rules in {@link ZFile} then this method does nothing. Otherwise it will move the file's data
    738      * into memory and place it in a different area of the zip.
    739      *
    740      * @return has this file been changed? Note that if the entry has not yet been written on the
    741      * file, realignment does not count as a change as nothing needs to be updated in the file;
    742      * also, if the entry has been changed, this object may have been marked as deleted and a new
    743      * stored entry may need to be fetched from the file
    744      * @throws IOException failed to realign the entry; the entry may no longer exist in the zip
    745      * file
    746      */
    747     public boolean realign() throws IOException {
    748         Preconditions.checkState(!deleted, "Entry has been deleted.");
    749 
    750         return file.realign(this);
    751     }
    752 
    753     /**
    754      * Obtains the contents of the local extra field.
    755      *
    756      * @return the contents of the local extra field
    757      */
    758     @Nonnull
    759     public ExtraField getLocalExtra() {
    760         return localExtra;
    761     }
    762 
    763     /**
    764      * Sets the contents of the local extra field.
    765      *
    766      * @param localExtra the contents of the local extra field
    767      * @throws IOException failed to update the zip file
    768      */
    769     public void setLocalExtra(@Nonnull ExtraField localExtra) throws IOException {
    770         boolean resized = setLocalExtraNoNotify(localExtra);
    771         file.localHeaderChanged(this, resized);
    772     }
    773 
    774     /**
    775      * Sets the contents of the local extra field, does not notify the {@link ZFile} of the change.
    776      * This is used internally when the {@link ZFile} itself wants to change the local extra and
    777      * doesn't need the callback.
    778      *
    779      * @param localExtra the contents of the local extra field
    780      * @return has the local header size changed?
    781      * @throws IOException failed to load the file
    782      */
    783     boolean setLocalExtraNoNotify(@Nonnull ExtraField localExtra) throws IOException {
    784         boolean sizeChanged;
    785 
    786         /*
    787          * Make sure we load into memory.
    788          *
    789          * If we change the size of the local header, the actual start of the file changes
    790          * according to our in-memory structures so, if we don't read the file now, we won't be
    791          * able to load it later :)
    792          *
    793          * But, even if the size doesn't change, we need to read it force the entry to be
    794          * rewritten otherwise the changes in the local header aren't written. Of course this case
    795          * may be optimized with some extra complexity added :)
    796          */
    797         loadSourceIntoMemory();
    798 
    799         if (this.localExtra.size() != localExtra.size()) {
    800             sizeChanged = true;
    801         } else {
    802             sizeChanged = false;
    803         }
    804 
    805         this.localExtra = localExtra;
    806         return sizeChanged;
    807     }
    808 
    809     /**
    810      * Obtains the verify log for the entry.
    811      *
    812      * @return the verify log
    813      */
    814     @Nonnull
    815     public VerifyLog getVerifyLog() {
    816         return verifyLog;
    817     }
    818 }
    819