001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019package org.apache.commons.compress.archivers.zip; 020 021import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 022import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 023import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 025 026import java.io.ByteArrayInputStream; 027import java.io.ByteArrayOutputStream; 028import java.io.EOFException; 029import java.io.IOException; 030import java.io.InputStream; 031import java.io.PushbackInputStream; 032import java.math.BigInteger; 033import java.nio.ByteBuffer; 034import java.util.Arrays; 035import java.util.Objects; 036import java.util.zip.CRC32; 037import java.util.zip.DataFormatException; 038import java.util.zip.Inflater; 039import java.util.zip.ZipEntry; 040import java.util.zip.ZipException; 041 042import org.apache.commons.compress.archivers.ArchiveEntry; 043import org.apache.commons.compress.archivers.ArchiveInputStream; 044import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; 045import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream; 046import org.apache.commons.compress.utils.ArchiveUtils; 047import org.apache.commons.compress.utils.IOUtils; 048import org.apache.commons.compress.utils.InputStreamStatistics; 049 050/** 051 * Implements an input stream that can read Zip archives. 052 * 053 * <p>As of Apache Commons Compress it transparently supports Zip64 054 * extensions and thus individual entries and archives larger than 4 055 * GB or with more than 65536 entries.</p> 056 * 057 * <p>The {@link ZipFile} class is preferred when reading from files 058 * as {@link ZipArchiveInputStream} is limited by not being able to 059 * read the central directory header before returning entries. In 060 * particular {@link ZipArchiveInputStream}</p> 061 * 062 * <ul> 063 * 064 * <li>may return entries that are not part of the central directory 065 * at all and shouldn't be considered part of the archive.</li> 066 * 067 * <li>may return several entries with the same name.</li> 068 * 069 * <li>will not return internal or external attributes.</li> 070 * 071 * <li>may return incomplete extra field data.</li> 072 * 073 * <li>may return unknown sizes and CRC values for entries until the 074 * next entry has been reached if the archive uses the data 075 * descriptor feature.</li> 076 * 077 * </ul> 078 * 079 * @see ZipFile 080 * @NotThreadSafe 081 */ 082public class ZipArchiveInputStream extends ArchiveInputStream implements InputStreamStatistics { 083 084 /** 085 * Bounded input stream adapted from commons-io 086 */ 087 private class BoundedInputStream extends InputStream { 088 089 /** the wrapped input stream */ 090 private final InputStream in; 091 092 /** the max length to provide */ 093 private final long max; 094 095 /** the number of bytes already returned */ 096 private long pos; 097 098 /** 099 * Creates a new {@code BoundedInputStream} that wraps the given input 100 * stream and limits it to a certain size. 101 * 102 * @param in The wrapped input stream 103 * @param size The maximum number of bytes to return 104 */ 105 public BoundedInputStream(final InputStream in, final long size) { 106 this.max = size; 107 this.in = in; 108 } 109 110 @Override 111 public int available() throws IOException { 112 if (max >= 0 && pos >= max) { 113 return 0; 114 } 115 return in.available(); 116 } 117 118 @Override 119 public int read() throws IOException { 120 if (max >= 0 && pos >= max) { 121 return -1; 122 } 123 final int result = in.read(); 124 pos++; 125 count(1); 126 current.bytesReadFromStream++; 127 return result; 128 } 129 130 @Override 131 public int read(final byte[] b) throws IOException { 132 return this.read(b, 0, b.length); 133 } 134 135 @Override 136 public int read(final byte[] b, final int off, final int len) throws IOException { 137 if (len == 0) { 138 return 0; 139 } 140 if (max >= 0 && pos >= max) { 141 return -1; 142 } 143 final long maxRead = max >= 0 ? Math.min(len, max - pos) : len; 144 final int bytesRead = in.read(b, off, (int) maxRead); 145 146 if (bytesRead == -1) { 147 return -1; 148 } 149 150 pos += bytesRead; 151 count(bytesRead); 152 current.bytesReadFromStream += bytesRead; 153 return bytesRead; 154 } 155 156 @Override 157 public long skip(final long n) throws IOException { 158 final long toSkip = max >= 0 ? Math.min(n, max - pos) : n; 159 final long skippedBytes = IOUtils.skip(in, toSkip); 160 pos += skippedBytes; 161 return skippedBytes; 162 } 163 } 164 165 /** 166 * Structure collecting information for the entry that is 167 * currently being read. 168 */ 169 private static final class CurrentEntry { 170 171 /** 172 * Current ZIP entry. 173 */ 174 private final ZipArchiveEntry entry = new ZipArchiveEntry(); 175 176 /** 177 * Does the entry use a data descriptor? 178 */ 179 private boolean hasDataDescriptor; 180 181 /** 182 * Does the entry have a ZIP64 extended information extra field. 183 */ 184 private boolean usesZip64; 185 186 /** 187 * Number of bytes of entry content read by the client if the 188 * entry is STORED. 189 */ 190 private long bytesRead; 191 192 /** 193 * Number of bytes of entry content read from the stream. 194 * 195 * <p>This may be more than the actual entry's length as some 196 * stuff gets buffered up and needs to be pushed back when the 197 * end of the entry has been reached.</p> 198 */ 199 private long bytesReadFromStream; 200 201 /** 202 * The checksum calculated as the current entry is read. 203 */ 204 private final CRC32 crc = new CRC32(); 205 206 /** 207 * The input stream decompressing the data for shrunk and imploded entries. 208 */ 209 private InputStream inputStream; 210 211 @SuppressWarnings("unchecked") // Caller beware 212 private <T extends InputStream> T checkInputStream() { 213 return (T) Objects.requireNonNull(inputStream, "inputStream"); 214 } 215 } 216 217 private static final int LFH_LEN = 30; 218 /* 219 local file header signature WORD 220 version needed to extract SHORT 221 general purpose bit flag SHORT 222 compression method SHORT 223 last mod file time SHORT 224 last mod file date SHORT 225 crc-32 WORD 226 compressed size WORD 227 uncompressed size WORD 228 file name length SHORT 229 extra field length SHORT 230 */ 231 232 private static final int CFH_LEN = 46; 233 /* 234 central file header signature WORD 235 version made by SHORT 236 version needed to extract SHORT 237 general purpose bit flag SHORT 238 compression method SHORT 239 last mod file time SHORT 240 last mod file date SHORT 241 crc-32 WORD 242 compressed size WORD 243 uncompressed size WORD 244 file name length SHORT 245 extra field length SHORT 246 file comment length SHORT 247 disk number start SHORT 248 internal file attributes SHORT 249 external file attributes WORD 250 relative offset of local header WORD 251 */ 252 253 private static final long TWO_EXP_32 = ZIP64_MAGIC + 1; 254 255 private static final String USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER = 256 " while reading a stored entry using data descriptor. Either the archive is broken" 257 + " or it can not be read using ZipArchiveInputStream and you must use ZipFile." 258 + " A common cause for this is a ZIP archive containing a ZIP archive." 259 + " See http://commons.apache.org/proper/commons-compress/zip.html#ZipArchiveInputStream_vs_ZipFile"; 260 261 private static final byte[] LFH = ZipLong.LFH_SIG.getBytes(); 262 263 private static final byte[] CFH = ZipLong.CFH_SIG.getBytes(); 264 265 private static final byte[] DD = ZipLong.DD_SIG.getBytes(); 266 267 private static final byte[] APK_SIGNING_BLOCK_MAGIC = { 268 'A', 'P', 'K', ' ', 'S', 'i', 'g', ' ', 'B', 'l', 'o', 'c', 'k', ' ', '4', '2', 269 }; 270 271 private static final BigInteger LONG_MAX = BigInteger.valueOf(Long.MAX_VALUE); 272 273 private static boolean checksig(final byte[] signature, final byte[] expected) { 274 for (int i = 0; i < expected.length; i++) { 275 if (signature[i] != expected[i]) { 276 return false; 277 } 278 } 279 return true; 280 } 281 282 /** 283 * Checks if the signature matches what is expected for a ZIP file. 284 * Does not currently handle self-extracting ZIPs which may have arbitrary 285 * leading content. 286 * 287 * @param signature the bytes to check 288 * @param length the number of bytes to check 289 * @return true, if this stream is a ZIP archive stream, false otherwise 290 */ 291 public static boolean matches(final byte[] signature, final int length) { 292 if (length < ZipArchiveOutputStream.LFH_SIG.length) { 293 return false; 294 } 295 296 return checksig(signature, ZipArchiveOutputStream.LFH_SIG) // normal file 297 || checksig(signature, ZipArchiveOutputStream.EOCD_SIG) // empty zip 298 || checksig(signature, ZipArchiveOutputStream.DD_SIG) // split zip 299 || checksig(signature, ZipLong.SINGLE_SEGMENT_SPLIT_MARKER.getBytes()); 300 } 301 302 /** The ZIP encoding to use for file names and the file comment. */ 303 private final ZipEncoding zipEncoding; 304 305 // the provided encoding (for unit tests) 306 final String encoding; 307 308 /** Whether to look for and use Unicode extra fields. */ 309 private final boolean useUnicodeExtraFields; 310 311 /** Wrapped stream, will always be a PushbackInputStream. */ 312 private final InputStream inputStream; 313 /** Inflater used for all deflated entries. */ 314 private final Inflater inf = new Inflater(true); 315 /** Buffer used to read from the wrapped stream. */ 316 private final ByteBuffer buf = ByteBuffer.allocate(ZipArchiveOutputStream.BUFFER_SIZE); 317 /** The entry that is currently being read. */ 318 private CurrentEntry current; 319 /** Whether the stream has been closed. */ 320 private boolean closed; 321 322 /** Whether the stream has reached the central directory - and thus found all entries. */ 323 private boolean hitCentralDirectory; 324 325 /** 326 * When reading a stored entry that uses the data descriptor this 327 * stream has to read the full entry and caches it. This is the 328 * cache. 329 */ 330 private ByteArrayInputStream lastStoredEntry; 331 332 /** 333 * Whether the stream will try to read STORED entries that use a data descriptor. 334 * Setting it to true means we will not stop reading a entry with the compressed 335 * size, instead we will stoping reading a entry when a data descriptor is met(by 336 * finding the Data Descriptor Signature). This will completely break down in some 337 * cases - like JARs in WARs. 338 * <p> 339 * See also : 340 * https://issues.apache.org/jira/projects/COMPRESS/issues/COMPRESS-555 341 * https://github.com/apache/commons-compress/pull/137#issuecomment-690835644 342 */ 343 private final boolean allowStoredEntriesWithDataDescriptor; 344 345 /** Count decompressed bytes for current entry */ 346 private long uncompressedCount; 347 348 /** Whether the stream will try to skip the ZIP split signature(08074B50) at the beginning **/ 349 private final boolean skipSplitSig; 350 351 // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection) 352 private final byte[] lfhBuf = new byte[LFH_LEN]; 353 354 private final byte[] skipBuf = new byte[1024]; 355 356 private final byte[] shortBuf = new byte[SHORT]; 357 358 private final byte[] wordBuf = new byte[WORD]; 359 360 private final byte[] twoDwordBuf = new byte[2 * DWORD]; 361 362 private int entriesRead; 363 364 /** 365 * Create an instance using UTF-8 encoding 366 * @param inputStream the stream to wrap 367 */ 368 public ZipArchiveInputStream(final InputStream inputStream) { 369 this(inputStream, ZipEncodingHelper.UTF8); 370 } 371 372 /** 373 * Create an instance using the specified encoding 374 * @param inputStream the stream to wrap 375 * @param encoding the encoding to use for file names, use null 376 * for the platform's default encoding 377 * @since 1.5 378 */ 379 public ZipArchiveInputStream(final InputStream inputStream, final String encoding) { 380 this(inputStream, encoding, true); 381 } 382 383 /** 384 * Create an instance using the specified encoding 385 * @param inputStream the stream to wrap 386 * @param encoding the encoding to use for file names, use null 387 * for the platform's default encoding 388 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 389 * Extra Fields (if present) to set the file names. 390 */ 391 public ZipArchiveInputStream(final InputStream inputStream, final String encoding, final boolean useUnicodeExtraFields) { 392 this(inputStream, encoding, useUnicodeExtraFields, false); 393 } 394 395 /** 396 * Create an instance using the specified encoding 397 * @param inputStream the stream to wrap 398 * @param encoding the encoding to use for file names, use null 399 * for the platform's default encoding 400 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 401 * Extra Fields (if present) to set the file names. 402 * @param allowStoredEntriesWithDataDescriptor whether the stream 403 * will try to read STORED entries that use a data descriptor 404 * @since 1.1 405 */ 406 public ZipArchiveInputStream(final InputStream inputStream, 407 final String encoding, 408 final boolean useUnicodeExtraFields, 409 final boolean allowStoredEntriesWithDataDescriptor) { 410 this(inputStream, encoding, useUnicodeExtraFields, allowStoredEntriesWithDataDescriptor, false); 411 } 412 413 /** 414 * Create an instance using the specified encoding 415 * @param inputStream the stream to wrap 416 * @param encoding the encoding to use for file names, use null 417 * for the platform's default encoding 418 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 419 * Extra Fields (if present) to set the file names. 420 * @param allowStoredEntriesWithDataDescriptor whether the stream 421 * will try to read STORED entries that use a data descriptor 422 * @param skipSplitSig Whether the stream will try to skip the zip 423 * split signature(08074B50) at the beginning. You will need to 424 * set this to true if you want to read a split archive. 425 * @since 1.20 426 */ 427 public ZipArchiveInputStream(final InputStream inputStream, 428 final String encoding, 429 final boolean useUnicodeExtraFields, 430 final boolean allowStoredEntriesWithDataDescriptor, 431 final boolean skipSplitSig) { 432 this.encoding = encoding; 433 zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 434 this.useUnicodeExtraFields = useUnicodeExtraFields; 435 this.inputStream = new PushbackInputStream(inputStream, buf.capacity()); 436 this.allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor; 437 this.skipSplitSig = skipSplitSig; 438 // haven't read anything so far 439 buf.limit(0); 440 } 441 442 /** 443 * Checks whether the current buffer contains the signature of a 444 * "data descriptor", "local file header" or 445 * "central directory entry". 446 * 447 * <p>If it contains such a signature, reads the data descriptor 448 * and positions the stream right after the data descriptor.</p> 449 */ 450 private boolean bufferContainsSignature(final ByteArrayOutputStream bos, final int offset, final int lastRead, final int expectedDDLen) 451 throws IOException { 452 453 boolean done = false; 454 for (int i = 0; !done && i < offset + lastRead - 4; i++) { 455 if (buf.array()[i] == LFH[0] && buf.array()[i + 1] == LFH[1]) { 456 int expectDDPos = i; 457 if (i >= expectedDDLen && 458 (buf.array()[i + 2] == LFH[2] && buf.array()[i + 3] == LFH[3]) 459 || (buf.array()[i + 2] == CFH[2] && buf.array()[i + 3] == CFH[3])) { 460 // found a LFH or CFH: 461 expectDDPos = i - expectedDDLen; 462 done = true; 463 } 464 else if (buf.array()[i + 2] == DD[2] && buf.array()[i + 3] == DD[3]) { 465 // found DD: 466 done = true; 467 } 468 if (done) { 469 // * push back bytes read in excess as well as the data 470 // descriptor 471 // * copy the remaining bytes to cache 472 // * read data descriptor 473 pushback(buf.array(), expectDDPos, offset + lastRead - expectDDPos); 474 bos.write(buf.array(), 0, expectDDPos); 475 readDataDescriptor(); 476 } 477 } 478 } 479 return done; 480 } 481 482 /** 483 * If the last read bytes could hold a data descriptor and an 484 * incomplete signature then save the last bytes to the front of 485 * the buffer and cache everything in front of the potential data 486 * descriptor into the given ByteArrayOutputStream. 487 * 488 * <p>Data descriptor plus incomplete signature (3 bytes in the 489 * worst case) can be 20 bytes max.</p> 490 */ 491 private int cacheBytesRead(final ByteArrayOutputStream bos, int offset, final int lastRead, final int expecteDDLen) { 492 final int cacheable = offset + lastRead - expecteDDLen - 3; 493 if (cacheable > 0) { 494 bos.write(buf.array(), 0, cacheable); 495 System.arraycopy(buf.array(), cacheable, buf.array(), 0, expecteDDLen + 3); 496 offset = expecteDDLen + 3; 497 } else { 498 offset += lastRead; 499 } 500 return offset; 501 } 502 503 /** 504 * Whether this class is able to read the given entry. 505 * 506 * <p>May return false if it is set up to use encryption or a 507 * compression method that hasn't been implemented yet.</p> 508 * @since 1.1 509 */ 510 @Override 511 public boolean canReadEntryData(final ArchiveEntry ae) { 512 if (ae instanceof ZipArchiveEntry) { 513 final ZipArchiveEntry ze = (ZipArchiveEntry) ae; 514 return ZipUtil.canHandleEntryData(ze) 515 && supportsDataDescriptorFor(ze) 516 && supportsCompressedSizeFor(ze); 517 } 518 return false; 519 } 520 521 @Override 522 public void close() throws IOException { 523 if (!closed) { 524 closed = true; 525 try { 526 inputStream.close(); 527 } finally { 528 inf.end(); 529 } 530 } 531 } 532 533 /** 534 * Closes the current ZIP archive entry and positions the underlying 535 * stream to the beginning of the next entry. All per-entry variables 536 * and data structures are cleared. 537 * <p> 538 * If the compressed size of this entry is included in the entry header, 539 * then any outstanding bytes are simply skipped from the underlying 540 * stream without uncompressing them. This allows an entry to be safely 541 * closed even if the compression method is unsupported. 542 * <p> 543 * In case we don't know the compressed size of this entry or have 544 * already buffered too much data from the underlying stream to support 545 * uncompression, then the uncompression process is completed and the 546 * end position of the stream is adjusted based on the result of that 547 * process. 548 * 549 * @throws IOException if an error occurs 550 */ 551 private void closeEntry() throws IOException { 552 if (closed) { 553 throw new IOException("The stream is closed"); 554 } 555 if (current == null) { 556 return; 557 } 558 559 // Ensure all entry bytes are read 560 if (currentEntryHasOutstandingBytes()) { 561 drainCurrentEntryData(); 562 } else { 563 // this is guaranteed to exhaust the stream 564 skip(Long.MAX_VALUE); //NOSONAR 565 566 final long inB = current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED 567 ? getBytesInflated() : current.bytesRead; 568 569 // this is at most a single read() operation and can't 570 // exceed the range of int 571 final int diff = (int) (current.bytesReadFromStream - inB); 572 573 // Pushback any required bytes 574 if (diff > 0) { 575 pushback(buf.array(), buf.limit() - diff, diff); 576 current.bytesReadFromStream -= diff; 577 } 578 579 // Drain remainder of entry if not all data bytes were required 580 if (currentEntryHasOutstandingBytes()) { 581 drainCurrentEntryData(); 582 } 583 } 584 585 if (lastStoredEntry == null && current.hasDataDescriptor) { 586 readDataDescriptor(); 587 } 588 589 inf.reset(); 590 buf.clear().flip(); 591 current = null; 592 lastStoredEntry = null; 593 } 594 595 /** 596 * If the compressed size of the current entry is included in the entry header 597 * and there are any outstanding bytes in the underlying stream, then 598 * this returns true. 599 * 600 * @return true, if current entry is determined to have outstanding bytes, false otherwise 601 */ 602 private boolean currentEntryHasOutstandingBytes() { 603 return current.bytesReadFromStream <= current.entry.getCompressedSize() 604 && !current.hasDataDescriptor; 605 } 606 607 /** 608 * Read all data of the current entry from the underlying stream 609 * that hasn't been read, yet. 610 */ 611 private void drainCurrentEntryData() throws IOException { 612 long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream; 613 while (remaining > 0) { 614 final long n = inputStream.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining)); 615 if (n < 0) { 616 throw new EOFException("Truncated ZIP entry: " 617 + ArchiveUtils.sanitize(current.entry.getName())); 618 } 619 count(n); 620 remaining -= n; 621 } 622 } 623 624 private int fill() throws IOException { 625 if (closed) { 626 throw new IOException("The stream is closed"); 627 } 628 final int length = inputStream.read(buf.array()); 629 if (length > 0) { 630 buf.limit(length); 631 count(buf.limit()); 632 inf.setInput(buf.array(), 0, buf.limit()); 633 } 634 return length; 635 } 636 637 /** 638 * Reads forward until the signature of the "End of central 639 * directory" record is found. 640 */ 641 private boolean findEocdRecord() throws IOException { 642 int currentByte = -1; 643 boolean skipReadCall = false; 644 while (skipReadCall || (currentByte = readOneByte()) > -1) { 645 skipReadCall = false; 646 if (!isFirstByteOfEocdSig(currentByte)) { 647 continue; 648 } 649 currentByte = readOneByte(); 650 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[1]) { 651 if (currentByte == -1) { 652 break; 653 } 654 skipReadCall = isFirstByteOfEocdSig(currentByte); 655 continue; 656 } 657 currentByte = readOneByte(); 658 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[2]) { 659 if (currentByte == -1) { 660 break; 661 } 662 skipReadCall = isFirstByteOfEocdSig(currentByte); 663 continue; 664 } 665 currentByte = readOneByte(); 666 if (currentByte == -1) { 667 break; 668 } 669 if (currentByte == ZipArchiveOutputStream.EOCD_SIG[3]) { 670 return true; 671 } 672 skipReadCall = isFirstByteOfEocdSig(currentByte); 673 } 674 return false; 675 } 676 677 /** 678 * Get the number of bytes Inflater has actually processed. 679 * 680 * <p>for Java < Java7 the getBytes* methods in 681 * Inflater/Deflater seem to return unsigned ints rather than 682 * longs that start over with 0 at 2^32.</p> 683 * 684 * <p>The stream knows how many bytes it has read, but not how 685 * many the Inflater actually consumed - it should be between the 686 * total number of bytes read for the entry and the total number 687 * minus the last read operation. Here we just try to make the 688 * value close enough to the bytes we've read by assuming the 689 * number of bytes consumed must be smaller than (or equal to) the 690 * number of bytes read but not smaller by more than 2^32.</p> 691 */ 692 private long getBytesInflated() { 693 long inB = inf.getBytesRead(); 694 if (current.bytesReadFromStream >= TWO_EXP_32) { 695 while (inB + TWO_EXP_32 <= current.bytesReadFromStream) { 696 inB += TWO_EXP_32; 697 } 698 } 699 return inB; 700 } 701 702 /** 703 * @since 1.17 704 */ 705 @SuppressWarnings("resource") // checkInputStream() does not allocate. 706 @Override 707 public long getCompressedCount() { 708 final int method = current.entry.getMethod(); 709 if (method == ZipArchiveOutputStream.STORED) { 710 return current.bytesRead; 711 } 712 if (method == ZipArchiveOutputStream.DEFLATED) { 713 return getBytesInflated(); 714 } 715 if (method == ZipMethod.UNSHRINKING.getCode() 716 || method == ZipMethod.IMPLODING.getCode() 717 || method == ZipMethod.ENHANCED_DEFLATED.getCode() 718 || method == ZipMethod.BZIP2.getCode()) { 719 return ((InputStreamStatistics) current.checkInputStream()).getCompressedCount(); 720 } 721 return -1; 722 } 723 724 @Override 725 public ArchiveEntry getNextEntry() throws IOException { 726 return getNextZipEntry(); 727 } 728 729 public ZipArchiveEntry getNextZipEntry() throws IOException { 730 uncompressedCount = 0; 731 732 boolean firstEntry = true; 733 if (closed || hitCentralDirectory) { 734 return null; 735 } 736 if (current != null) { 737 closeEntry(); 738 firstEntry = false; 739 } 740 741 final long currentHeaderOffset = getBytesRead(); 742 try { 743 if (firstEntry) { 744 // split archives have a special signature before the 745 // first local file header - look for it and fail with 746 // the appropriate error message if this is a split 747 // archive. 748 readFirstLocalFileHeader(); 749 } else { 750 readFully(lfhBuf); 751 } 752 } catch (final EOFException e) { //NOSONAR 753 return null; 754 } 755 756 final ZipLong sig = new ZipLong(lfhBuf); 757 if (!sig.equals(ZipLong.LFH_SIG)) { 758 if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG) || isApkSigningBlock(lfhBuf)) { 759 hitCentralDirectory = true; 760 skipRemainderOfArchive(); 761 return null; 762 } 763 throw new ZipException(String.format("Unexpected record signature: 0x%x", sig.getValue())); 764 } 765 766 int off = WORD; 767 current = new CurrentEntry(); 768 769 final int versionMadeBy = ZipShort.getValue(lfhBuf, off); 770 off += SHORT; 771 current.entry.setPlatform((versionMadeBy >> ZipFile.BYTE_SHIFT) & ZipFile.NIBLET_MASK); 772 773 final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(lfhBuf, off); 774 final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames(); 775 final ZipEncoding entryEncoding = hasUTF8Flag ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding; 776 current.hasDataDescriptor = gpFlag.usesDataDescriptor(); 777 current.entry.setGeneralPurposeBit(gpFlag); 778 779 off += SHORT; 780 781 current.entry.setMethod(ZipShort.getValue(lfhBuf, off)); 782 off += SHORT; 783 784 final long time = ZipUtil.dosToJavaTime(ZipLong.getValue(lfhBuf, off)); 785 current.entry.setTime(time); 786 off += WORD; 787 788 ZipLong size = null, cSize = null; 789 if (!current.hasDataDescriptor) { 790 current.entry.setCrc(ZipLong.getValue(lfhBuf, off)); 791 off += WORD; 792 793 cSize = new ZipLong(lfhBuf, off); 794 off += WORD; 795 796 size = new ZipLong(lfhBuf, off); 797 off += WORD; 798 } else { 799 off += 3 * WORD; 800 } 801 802 final int fileNameLen = ZipShort.getValue(lfhBuf, off); 803 804 off += SHORT; 805 806 final int extraLen = ZipShort.getValue(lfhBuf, off); 807 off += SHORT; // NOSONAR - assignment as documentation 808 809 final byte[] fileName = readRange(fileNameLen); 810 current.entry.setName(entryEncoding.decode(fileName), fileName); 811 if (hasUTF8Flag) { 812 current.entry.setNameSource(ZipArchiveEntry.NameSource.NAME_WITH_EFS_FLAG); 813 } 814 815 final byte[] extraData = readRange(extraLen); 816 try { 817 current.entry.setExtra(extraData); 818 } catch (final RuntimeException ex) { 819 final ZipException z = new ZipException("Invalid extra data in entry " + current.entry.getName()); 820 z.initCause(ex); 821 throw z; 822 } 823 824 if (!hasUTF8Flag && useUnicodeExtraFields) { 825 ZipUtil.setNameAndCommentFromExtraFields(current.entry, fileName, null); 826 } 827 828 processZip64Extra(size, cSize); 829 830 current.entry.setLocalHeaderOffset(currentHeaderOffset); 831 current.entry.setDataOffset(getBytesRead()); 832 current.entry.setStreamContiguous(true); 833 834 final ZipMethod m = ZipMethod.getMethodByCode(current.entry.getMethod()); 835 if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) { 836 if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) { 837 final InputStream bis = new BoundedInputStream(inputStream, current.entry.getCompressedSize()); 838 switch (m) { 839 case UNSHRINKING: 840 current.inputStream = new UnshrinkingInputStream(bis); 841 break; 842 case IMPLODING: 843 try { 844 current.inputStream = new ExplodingInputStream( 845 current.entry.getGeneralPurposeBit().getSlidingDictionarySize(), 846 current.entry.getGeneralPurposeBit().getNumberOfShannonFanoTrees(), 847 bis); 848 } catch (final IllegalArgumentException ex) { 849 throw new IOException("bad IMPLODE data", ex); 850 } 851 break; 852 case BZIP2: 853 current.inputStream = new BZip2CompressorInputStream(bis); 854 break; 855 case ENHANCED_DEFLATED: 856 current.inputStream = new Deflate64CompressorInputStream(bis); 857 break; 858 default: 859 // we should never get here as all supported methods have been covered 860 // will cause an error when read is invoked, don't throw an exception here so people can 861 // skip unsupported entries 862 break; 863 } 864 } 865 } else if (m == ZipMethod.ENHANCED_DEFLATED) { 866 current.inputStream = new Deflate64CompressorInputStream(inputStream); 867 } 868 869 entriesRead++; 870 return current.entry; 871 } 872 873 /** 874 * @since 1.17 875 */ 876 @Override 877 public long getUncompressedCount() { 878 return uncompressedCount; 879 } 880 881 /** 882 * Checks whether this might be an APK Signing Block. 883 * 884 * <p>Unfortunately the APK signing block does not start with some kind of signature, it rather ends with one. It 885 * starts with a length, so what we do is parse the suspect length, skip ahead far enough, look for the signature 886 * and if we've found it, return true.</p> 887 * 888 * @param suspectLocalFileHeader the bytes read from the underlying stream in the expectation that they would hold 889 * the local file header of the next entry. 890 * 891 * @return true if this looks like a APK signing block 892 * 893 * @see <a href="https://source.android.com/security/apksigning/v2">https://source.android.com/security/apksigning/v2</a> 894 */ 895 private boolean isApkSigningBlock(final byte[] suspectLocalFileHeader) throws IOException { 896 // length of block excluding the size field itself 897 final BigInteger len = ZipEightByteInteger.getValue(suspectLocalFileHeader); 898 // LFH has already been read and all but the first eight bytes contain (part of) the APK signing block, 899 // also subtract 16 bytes in order to position us at the magic string 900 BigInteger toSkip = len.add(BigInteger.valueOf(DWORD - suspectLocalFileHeader.length 901 - (long) APK_SIGNING_BLOCK_MAGIC.length)); 902 final byte[] magic = new byte[APK_SIGNING_BLOCK_MAGIC.length]; 903 904 try { 905 if (toSkip.signum() < 0) { 906 // suspectLocalFileHeader contains the start of suspect magic string 907 final int off = suspectLocalFileHeader.length + toSkip.intValue(); 908 // length was shorter than magic length 909 if (off < DWORD) { 910 return false; 911 } 912 final int bytesInBuffer = Math.abs(toSkip.intValue()); 913 System.arraycopy(suspectLocalFileHeader, off, magic, 0, Math.min(bytesInBuffer, magic.length)); 914 if (bytesInBuffer < magic.length) { 915 readFully(magic, bytesInBuffer); 916 } 917 } else { 918 while (toSkip.compareTo(LONG_MAX) > 0) { 919 realSkip(Long.MAX_VALUE); 920 toSkip = toSkip.add(LONG_MAX.negate()); 921 } 922 realSkip(toSkip.longValue()); 923 readFully(magic); 924 } 925 } catch (final EOFException ex) { //NOSONAR 926 // length was invalid 927 return false; 928 } 929 return Arrays.equals(magic, APK_SIGNING_BLOCK_MAGIC); 930 } 931 932 private boolean isFirstByteOfEocdSig(final int b) { 933 return b == ZipArchiveOutputStream.EOCD_SIG[0]; 934 } 935 936 /** 937 * Records whether a Zip64 extra is present and sets the size 938 * information from it if sizes are 0xFFFFFFFF and the entry 939 * doesn't use a data descriptor. 940 */ 941 private void processZip64Extra(final ZipLong size, final ZipLong cSize) throws ZipException { 942 final ZipExtraField extra = 943 current.entry.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 944 if (extra != null && !(extra instanceof Zip64ExtendedInformationExtraField)) { 945 throw new ZipException("archive contains unparseable zip64 extra field"); 946 } 947 final Zip64ExtendedInformationExtraField z64 = 948 (Zip64ExtendedInformationExtraField) extra; 949 current.usesZip64 = z64 != null; 950 if (!current.hasDataDescriptor) { 951 if (z64 != null // same as current.usesZip64 but avoids NPE warning 952 && (ZipLong.ZIP64_MAGIC.equals(cSize) || ZipLong.ZIP64_MAGIC.equals(size)) ) { 953 if (z64.getCompressedSize() == null || z64.getSize() == null) { 954 // avoid NPE if it's a corrupted ZIP archive 955 throw new ZipException("archive contains corrupted zip64 extra field"); 956 } 957 long s = z64.getCompressedSize().getLongValue(); 958 if (s < 0) { 959 throw new ZipException("broken archive, entry with negative compressed size"); 960 } 961 current.entry.setCompressedSize(s); 962 s = z64.getSize().getLongValue(); 963 if (s < 0) { 964 throw new ZipException("broken archive, entry with negative size"); 965 } 966 current.entry.setSize(s); 967 } else if (cSize != null && size != null) { 968 if (cSize.getValue() < 0) { 969 throw new ZipException("broken archive, entry with negative compressed size"); 970 } 971 current.entry.setCompressedSize(cSize.getValue()); 972 if (size.getValue() < 0) { 973 throw new ZipException("broken archive, entry with negative size"); 974 } 975 current.entry.setSize(size.getValue()); 976 } 977 } 978 } 979 980 private void pushback(final byte[] buf, final int offset, final int length) throws IOException { 981 ((PushbackInputStream) inputStream).unread(buf, offset, length); 982 pushedBackBytes(length); 983 } 984 985 @Override 986 public int read(final byte[] buffer, final int offset, final int length) throws IOException { 987 if (length == 0) { 988 return 0; 989 } 990 if (closed) { 991 throw new IOException("The stream is closed"); 992 } 993 994 if (current == null) { 995 return -1; 996 } 997 998 // avoid int overflow, check null buffer 999 if (offset > buffer.length || length < 0 || offset < 0 || buffer.length - offset < length) { 1000 throw new ArrayIndexOutOfBoundsException(); 1001 } 1002 1003 ZipUtil.checkRequestedFeatures(current.entry); 1004 if (!supportsDataDescriptorFor(current.entry)) { 1005 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.DATA_DESCRIPTOR, 1006 current.entry); 1007 } 1008 if (!supportsCompressedSizeFor(current.entry)) { 1009 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.UNKNOWN_COMPRESSED_SIZE, 1010 current.entry); 1011 } 1012 1013 final int read; 1014 if (current.entry.getMethod() == ZipArchiveOutputStream.STORED) { 1015 read = readStored(buffer, offset, length); 1016 } else if (current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED) { 1017 read = readDeflated(buffer, offset, length); 1018 } else if (current.entry.getMethod() == ZipMethod.UNSHRINKING.getCode() 1019 || current.entry.getMethod() == ZipMethod.IMPLODING.getCode() 1020 || current.entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() 1021 || current.entry.getMethod() == ZipMethod.BZIP2.getCode()) { 1022 read = current.inputStream.read(buffer, offset, length); 1023 } else { 1024 throw new UnsupportedZipFeatureException(ZipMethod.getMethodByCode(current.entry.getMethod()), 1025 current.entry); 1026 } 1027 1028 if (read >= 0) { 1029 current.crc.update(buffer, offset, read); 1030 uncompressedCount += read; 1031 } 1032 1033 return read; 1034 } 1035 private void readDataDescriptor() throws IOException { 1036 readFully(wordBuf); 1037 ZipLong val = new ZipLong(wordBuf); 1038 if (ZipLong.DD_SIG.equals(val)) { 1039 // data descriptor with signature, skip sig 1040 readFully(wordBuf); 1041 val = new ZipLong(wordBuf); 1042 } 1043 current.entry.setCrc(val.getValue()); 1044 1045 // if there is a ZIP64 extra field, sizes are eight bytes 1046 // each, otherwise four bytes each. Unfortunately some 1047 // implementations - namely Java7 - use eight bytes without 1048 // using a ZIP64 extra field - 1049 // https://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588 1050 1051 // just read 16 bytes and check whether bytes nine to twelve 1052 // look like one of the signatures of what could follow a data 1053 // descriptor (ignoring archive decryption headers for now). 1054 // If so, push back eight bytes and assume sizes are four 1055 // bytes, otherwise sizes are eight bytes each. 1056 readFully(twoDwordBuf); 1057 final ZipLong potentialSig = new ZipLong(twoDwordBuf, DWORD); 1058 if (potentialSig.equals(ZipLong.CFH_SIG) || potentialSig.equals(ZipLong.LFH_SIG)) { 1059 pushback(twoDwordBuf, DWORD, DWORD); 1060 long size = ZipLong.getValue(twoDwordBuf); 1061 if (size < 0) { 1062 throw new ZipException("broken archive, entry with negative compressed size"); 1063 } 1064 current.entry.setCompressedSize(size); 1065 size = ZipLong.getValue(twoDwordBuf, WORD); 1066 if (size < 0) { 1067 throw new ZipException("broken archive, entry with negative size"); 1068 } 1069 current.entry.setSize(size); 1070 } else { 1071 long size = ZipEightByteInteger.getLongValue(twoDwordBuf); 1072 if (size < 0) { 1073 throw new ZipException("broken archive, entry with negative compressed size"); 1074 } 1075 current.entry.setCompressedSize(size); 1076 size = ZipEightByteInteger.getLongValue(twoDwordBuf, DWORD); 1077 if (size < 0) { 1078 throw new ZipException("broken archive, entry with negative size"); 1079 } 1080 current.entry.setSize(size); 1081 } 1082 } 1083 /** 1084 * Implementation of read for DEFLATED entries. 1085 */ 1086 private int readDeflated(final byte[] buffer, final int offset, final int length) throws IOException { 1087 final int read = readFromInflater(buffer, offset, length); 1088 if (read <= 0) { 1089 if (inf.finished()) { 1090 return -1; 1091 } 1092 if (inf.needsDictionary()) { 1093 throw new ZipException("This archive needs a preset dictionary" 1094 + " which is not supported by Commons" 1095 + " Compress."); 1096 } 1097 if (read == -1) { 1098 throw new IOException("Truncated ZIP file"); 1099 } 1100 } 1101 return read; 1102 } 1103 1104 /** 1105 * Fills the given array with the first local file header and 1106 * deals with splitting/spanning markers that may prefix the first 1107 * LFH. 1108 */ 1109 private void readFirstLocalFileHeader() throws IOException { 1110 readFully(lfhBuf); 1111 final ZipLong sig = new ZipLong(lfhBuf); 1112 1113 if (!skipSplitSig && sig.equals(ZipLong.DD_SIG)) { 1114 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.SPLITTING); 1115 } 1116 1117 // the split ZIP signature(08074B50) should only be skipped when the skipSplitSig is set 1118 if (sig.equals(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER) || sig.equals(ZipLong.DD_SIG)) { 1119 // Just skip over the marker. 1120 final byte[] missedLfhBytes = new byte[4]; 1121 readFully(missedLfhBytes); 1122 System.arraycopy(lfhBuf, 4, lfhBuf, 0, LFH_LEN - 4); 1123 System.arraycopy(missedLfhBytes, 0, lfhBuf, LFH_LEN - 4, 4); 1124 } 1125 } 1126 1127 /** 1128 * Potentially reads more bytes to fill the inflater's buffer and 1129 * reads from it. 1130 */ 1131 private int readFromInflater(final byte[] buffer, final int offset, final int length) throws IOException { 1132 int read = 0; 1133 do { 1134 if (inf.needsInput()) { 1135 final int l = fill(); 1136 if (l > 0) { 1137 current.bytesReadFromStream += buf.limit(); 1138 } else if (l == -1) { 1139 return -1; 1140 } else { 1141 break; 1142 } 1143 } 1144 try { 1145 read = inf.inflate(buffer, offset, length); 1146 } catch (final DataFormatException e) { 1147 throw (IOException) new ZipException(e.getMessage()).initCause(e); 1148 } 1149 } while (read == 0 && inf.needsInput()); 1150 return read; 1151 } 1152 1153 private void readFully(final byte[] b) throws IOException { 1154 readFully(b, 0); 1155 } 1156 1157 // End of Central Directory Record 1158 // end of central dir signature WORD 1159 // number of this disk SHORT 1160 // number of the disk with the 1161 // start of the central directory SHORT 1162 // total number of entries in the 1163 // central directory on this disk SHORT 1164 // total number of entries in 1165 // the central directory SHORT 1166 // size of the central directory WORD 1167 // offset of start of central 1168 // directory with respect to 1169 // the starting disk number WORD 1170 // .ZIP file comment length SHORT 1171 // .ZIP file comment up to 64KB 1172 // 1173 1174 private void readFully(final byte[] b, final int off) throws IOException { 1175 final int len = b.length - off; 1176 final int count = IOUtils.readFully(inputStream, b, off, len); 1177 count(count); 1178 if (count < len) { 1179 throw new EOFException(); 1180 } 1181 } 1182 1183 /** 1184 * Reads bytes by reading from the underlying stream rather than 1185 * the (potentially inflating) archive stream - which {@link #read} would do. 1186 * 1187 * Also updates bytes-read counter. 1188 */ 1189 private int readOneByte() throws IOException { 1190 final int b = inputStream.read(); 1191 if (b != -1) { 1192 count(1); 1193 } 1194 return b; 1195 } 1196 1197 private byte[] readRange(final int len) throws IOException { 1198 final byte[] ret = IOUtils.readRange(inputStream, len); 1199 count(ret.length); 1200 if (ret.length < len) { 1201 throw new EOFException(); 1202 } 1203 return ret; 1204 } 1205 1206 /** 1207 * Implementation of read for STORED entries. 1208 */ 1209 private int readStored(final byte[] buffer, final int offset, final int length) throws IOException { 1210 1211 if (current.hasDataDescriptor) { 1212 if (lastStoredEntry == null) { 1213 readStoredEntry(); 1214 } 1215 return lastStoredEntry.read(buffer, offset, length); 1216 } 1217 1218 final long csize = current.entry.getSize(); 1219 if (current.bytesRead >= csize) { 1220 return -1; 1221 } 1222 1223 if (buf.position() >= buf.limit()) { 1224 buf.position(0); 1225 final int l = inputStream.read(buf.array()); 1226 if (l == -1) { 1227 buf.limit(0); 1228 throw new IOException("Truncated ZIP file"); 1229 } 1230 buf.limit(l); 1231 1232 count(l); 1233 current.bytesReadFromStream += l; 1234 } 1235 1236 int toRead = Math.min(buf.remaining(), length); 1237 if ((csize - current.bytesRead) < toRead) { 1238 // if it is smaller than toRead then it fits into an int 1239 toRead = (int) (csize - current.bytesRead); 1240 } 1241 buf.get(buffer, offset, toRead); 1242 current.bytesRead += toRead; 1243 return toRead; 1244 } 1245 1246 /** 1247 * Caches a stored entry that uses the data descriptor. 1248 * 1249 * <ul> 1250 * <li>Reads a stored entry until the signature of a local file 1251 * header, central directory header or data descriptor has been 1252 * found.</li> 1253 * <li>Stores all entry data in lastStoredEntry.</p> 1254 * <li>Rewinds the stream to position at the data 1255 * descriptor.</li> 1256 * <li>reads the data descriptor</li> 1257 * </ul> 1258 * 1259 * <p>After calling this method the entry should know its size, 1260 * the entry's data is cached and the stream is positioned at the 1261 * next local file or central directory header.</p> 1262 */ 1263 private void readStoredEntry() throws IOException { 1264 final ByteArrayOutputStream bos = new ByteArrayOutputStream(); 1265 int off = 0; 1266 boolean done = false; 1267 1268 // length of DD without signature 1269 final int ddLen = current.usesZip64 ? WORD + 2 * DWORD : 3 * WORD; 1270 1271 while (!done) { 1272 final int r = inputStream.read(buf.array(), off, ZipArchiveOutputStream.BUFFER_SIZE - off); 1273 if (r <= 0) { 1274 // read the whole archive without ever finding a 1275 // central directory 1276 throw new IOException("Truncated ZIP file"); 1277 } 1278 if (r + off < 4) { 1279 // buffer too small to check for a signature, loop 1280 off += r; 1281 continue; 1282 } 1283 1284 done = bufferContainsSignature(bos, off, r, ddLen); 1285 if (!done) { 1286 off = cacheBytesRead(bos, off, r, ddLen); 1287 } 1288 } 1289 if (current.entry.getCompressedSize() != current.entry.getSize()) { 1290 throw new ZipException("compressed and uncompressed size don't match" 1291 + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1292 } 1293 final byte[] b = bos.toByteArray(); 1294 if (b.length != current.entry.getSize()) { 1295 throw new ZipException("actual and claimed size don't match" 1296 + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1297 } 1298 lastStoredEntry = new ByteArrayInputStream(b); 1299 } 1300 1301 /** 1302 * Skips bytes by reading from the underlying stream rather than 1303 * the (potentially inflating) archive stream - which {@link 1304 * #skip} would do. 1305 * 1306 * Also updates bytes-read counter. 1307 */ 1308 private void realSkip(final long value) throws IOException { 1309 if (value >= 0) { 1310 long skipped = 0; 1311 while (skipped < value) { 1312 final long rem = value - skipped; 1313 final int x = inputStream.read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1314 if (x == -1) { 1315 return; 1316 } 1317 count(x); 1318 skipped += x; 1319 } 1320 return; 1321 } 1322 throw new IllegalArgumentException(); 1323 } 1324 /** 1325 * Skips over and discards value bytes of data from this input 1326 * stream. 1327 * 1328 * <p>This implementation may end up skipping over some smaller 1329 * number of bytes, possibly 0, if and only if it reaches the end 1330 * of the underlying stream.</p> 1331 * 1332 * <p>The actual number of bytes skipped is returned.</p> 1333 * 1334 * @param value the number of bytes to be skipped. 1335 * @return the actual number of bytes skipped. 1336 * @throws IOException - if an I/O error occurs. 1337 * @throws IllegalArgumentException - if value is negative. 1338 */ 1339 @Override 1340 public long skip(final long value) throws IOException { 1341 if (value >= 0) { 1342 long skipped = 0; 1343 while (skipped < value) { 1344 final long rem = value - skipped; 1345 final int x = read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1346 if (x == -1) { 1347 return skipped; 1348 } 1349 skipped += x; 1350 } 1351 return skipped; 1352 } 1353 throw new IllegalArgumentException(); 1354 } 1355 1356 /** 1357 * Reads the stream until it find the "End of central directory 1358 * record" and consumes it as well. 1359 */ 1360 private void skipRemainderOfArchive() throws IOException { 1361 // skip over central directory. One LFH has been read too much 1362 // already. The calculation discounts file names and extra 1363 // data so it will be too short. 1364 if (entriesRead > 0) { 1365 realSkip((long) entriesRead * CFH_LEN - LFH_LEN); 1366 final boolean foundEocd = findEocdRecord(); 1367 if (foundEocd) { 1368 realSkip((long) ZipFile.MIN_EOCD_SIZE - WORD /* signature */ - SHORT /* comment len */); 1369 readFully(shortBuf); 1370 // file comment 1371 final int commentLen = ZipShort.getValue(shortBuf); 1372 if (commentLen >= 0) { 1373 realSkip(commentLen); 1374 return; 1375 } 1376 } 1377 } 1378 throw new IOException("Truncated ZIP file"); 1379 } 1380 1381 /** 1382 * Whether the compressed size for the entry is either known or 1383 * not required by the compression method being used. 1384 */ 1385 private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) { 1386 return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN 1387 || entry.getMethod() == ZipEntry.DEFLATED 1388 || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() 1389 || (entry.getGeneralPurposeBit().usesDataDescriptor() 1390 && allowStoredEntriesWithDataDescriptor 1391 && entry.getMethod() == ZipEntry.STORED); 1392 } 1393 1394 /** 1395 * Whether this entry requires a data descriptor this library can work with. 1396 * 1397 * @return true if allowStoredEntriesWithDataDescriptor is true, 1398 * the entry doesn't require any data descriptor or the method is 1399 * DEFLATED or ENHANCED_DEFLATED. 1400 */ 1401 private boolean supportsDataDescriptorFor(final ZipArchiveEntry entry) { 1402 return !entry.getGeneralPurposeBit().usesDataDescriptor() 1403 || (allowStoredEntriesWithDataDescriptor && entry.getMethod() == ZipEntry.STORED) 1404 || entry.getMethod() == ZipEntry.DEFLATED 1405 || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode(); 1406 } 1407}