001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 * 017 */ 018package org.apache.commons.compress.archivers.zip; 019 020import static org.apache.commons.compress.archivers.zip.ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 021import static org.apache.commons.compress.archivers.zip.ZipConstants.DEFLATE_MIN_VERSION; 022import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 023import static org.apache.commons.compress.archivers.zip.ZipConstants.INITIAL_VERSION; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 025import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 026import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 027import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT; 028import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MIN_VERSION; 029import static org.apache.commons.compress.archivers.zip.ZipLong.putLong; 030import static org.apache.commons.compress.archivers.zip.ZipShort.putShort; 031 032import java.io.ByteArrayOutputStream; 033import java.io.File; 034import java.io.IOException; 035import java.io.InputStream; 036import java.io.OutputStream; 037import java.nio.ByteBuffer; 038import java.nio.channels.SeekableByteChannel; 039import java.nio.file.Files; 040import java.nio.file.LinkOption; 041import java.nio.file.OpenOption; 042import java.nio.file.Path; 043import java.nio.file.StandardOpenOption; 044import java.util.Calendar; 045import java.util.EnumSet; 046import java.util.HashMap; 047import java.util.LinkedList; 048import java.util.List; 049import java.util.Map; 050import java.util.zip.Deflater; 051import java.util.zip.ZipException; 052 053import org.apache.commons.compress.archivers.ArchiveEntry; 054import org.apache.commons.compress.archivers.ArchiveOutputStream; 055import org.apache.commons.compress.utils.ByteUtils; 056import org.apache.commons.compress.utils.IOUtils; 057 058/** 059 * Reimplementation of {@link java.util.zip.ZipOutputStream 060 * java.util.zip.ZipOutputStream} that does handle the extended 061 * functionality of this package, especially internal/external file 062 * attributes and extra fields with different layouts for local file 063 * data and central directory entries. 064 * 065 * <p>This class will try to use {@link 066 * java.nio.channels.SeekableByteChannel} when it knows that the 067 * output is going to go to a file and no split archive shall be 068 * created.</p> 069 * 070 * <p>If SeekableByteChannel cannot be used, this implementation will use 071 * a Data Descriptor to store size and CRC information for {@link 072 * #DEFLATED DEFLATED} entries, this means, you don't need to 073 * calculate them yourself. Unfortunately this is not possible for 074 * the {@link #STORED STORED} method, here setting the CRC and 075 * uncompressed size information is required before {@link 076 * #putArchiveEntry(ArchiveEntry)} can be called.</p> 077 * 078 * <p>As of Apache Commons Compress 1.3 it transparently supports Zip64 079 * extensions and thus individual entries and archives larger than 4 080 * GB or with more than 65536 entries in most cases but explicit 081 * control is provided via {@link #setUseZip64}. If the stream can not 082 * use SeekableByteChannel and you try to write a ZipArchiveEntry of 083 * unknown size then Zip64 extensions will be disabled by default.</p> 084 * 085 * @NotThreadSafe 086 */ 087public class ZipArchiveOutputStream extends ArchiveOutputStream { 088 089 static final int BUFFER_SIZE = 512; 090 private static final int LFH_SIG_OFFSET = 0; 091 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 092 private static final int LFH_GPB_OFFSET = 6; 093 private static final int LFH_METHOD_OFFSET = 8; 094 private static final int LFH_TIME_OFFSET = 10; 095 private static final int LFH_CRC_OFFSET = 14; 096 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 097 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 098 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 099 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 100 private static final int LFH_FILENAME_OFFSET = 30; 101 private static final int CFH_SIG_OFFSET = 0; 102 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 103 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 104 private static final int CFH_GPB_OFFSET = 8; 105 private static final int CFH_METHOD_OFFSET = 10; 106 private static final int CFH_TIME_OFFSET = 12; 107 private static final int CFH_CRC_OFFSET = 16; 108 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 109 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 110 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 111 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 112 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 113 private static final int CFH_DISK_NUMBER_OFFSET = 34; 114 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 115 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 116 private static final int CFH_LFH_OFFSET = 42; 117 private static final int CFH_FILENAME_OFFSET = 46; 118 119 /** indicates if this archive is finished. protected for use in Jar implementation */ 120 protected boolean finished; 121 122 /** 123 * Compression method for deflated entries. 124 */ 125 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 126 127 /** 128 * Default compression level for deflated entries. 129 */ 130 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 131 132 /** 133 * Compression method for stored entries. 134 */ 135 public static final int STORED = java.util.zip.ZipEntry.STORED; 136 137 /** 138 * default encoding for file names and comment. 139 */ 140 static final String DEFAULT_ENCODING = ZipEncodingHelper.UTF8; 141 142 /** 143 * General purpose flag, which indicates that file names are 144 * written in UTF-8. 145 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 146 */ 147 @Deprecated 148 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 149 150 /** 151 * Current entry. 152 */ 153 private CurrentEntry entry; 154 155 /** 156 * The file comment. 157 */ 158 private String comment = ""; 159 160 /** 161 * Compression level for next entry. 162 */ 163 private int level = DEFAULT_COMPRESSION; 164 165 /** 166 * Has the compression level changed when compared to the last 167 * entry? 168 */ 169 private boolean hasCompressionLevelChanged; 170 171 /** 172 * Default compression method for next entry. 173 */ 174 private int method = java.util.zip.ZipEntry.DEFLATED; 175 176 /** 177 * List of ZipArchiveEntries written so far. 178 */ 179 private final List<ZipArchiveEntry> entries = 180 new LinkedList<>(); 181 182 private final StreamCompressor streamCompressor; 183 184 /** 185 * Start of central directory. 186 */ 187 private long cdOffset; 188 189 /** 190 * Length of central directory. 191 */ 192 private long cdLength; 193 194 /** 195 * Disk number start of central directory. 196 */ 197 private long cdDiskNumberStart; 198 199 /** 200 * Length of end of central directory 201 */ 202 private long eocdLength; 203 204 /** 205 * Helper, a 0 as ZipShort. 206 */ 207 private static final byte[] ZERO = {0, 0}; 208 209 /** 210 * Helper, a 0 as ZipLong. 211 */ 212 private static final byte[] LZERO = {0, 0, 0, 0}; 213 214 private static final byte[] ONE = ZipLong.getBytes(1L); 215 216 /** 217 * Holds some book-keeping data for each entry. 218 */ 219 private final Map<ZipArchiveEntry, EntryMetaData> metaData = 220 new HashMap<>(); 221 222 /** 223 * The encoding to use for file names and the file comment. 224 * 225 * <p>For a list of possible values see <a 226 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 227 * Defaults to UTF-8.</p> 228 */ 229 private String encoding = DEFAULT_ENCODING; 230 231 /** 232 * The zip encoding to use for file names and the file comment. 233 * 234 * This field is of internal use and will be set in {@link 235 * #setEncoding(String)}. 236 */ 237 private ZipEncoding zipEncoding = 238 ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING); 239 240 241 /** 242 * This Deflater object is used for output. 243 * 244 */ 245 protected final Deflater def; 246 /** 247 * Optional random access output. 248 */ 249 private final SeekableByteChannel channel; 250 251 private final OutputStream outputStream; 252 253 /** 254 * whether to use the general purpose bit flag when writing UTF-8 255 * file names or not. 256 */ 257 private boolean useUTF8Flag = true; 258 259 /** 260 * Whether to encode non-encodable file names as UTF-8. 261 */ 262 private boolean fallbackToUTF8; 263 264 /** 265 * whether to create UnicodePathExtraField-s for each entry. 266 */ 267 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 268 269 /** 270 * Whether anything inside this archive has used a ZIP64 feature. 271 * 272 * @since 1.3 273 */ 274 private boolean hasUsedZip64; 275 276 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 277 278 private final byte[] copyBuffer = new byte[32768]; 279 private final Calendar calendarInstance = Calendar.getInstance(); 280 281 /** 282 * Whether we are creating a split zip 283 */ 284 private final boolean isSplitZip; 285 286 /** 287 * Holds the number of Central Directories on each disk, this is used 288 * when writing Zip64 End Of Central Directory and End Of Central Directory 289 */ 290 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 291 292 /** 293 * Creates a new ZIP OutputStream filtering the underlying stream. 294 * @param out the outputstream to zip 295 */ 296 public ZipArchiveOutputStream(final OutputStream out) { 297 this.outputStream = out; 298 this.channel = null; 299 def = new Deflater(level, true); 300 streamCompressor = StreamCompressor.create(out, def); 301 isSplitZip = false; 302 } 303 304 /** 305 * Creates a new ZIP OutputStream writing to a File. Will use 306 * random access if possible. 307 * @param file the file to zip to 308 * @throws IOException on error 309 */ 310 public ZipArchiveOutputStream(final File file) throws IOException { 311 this(file.toPath()); 312 } 313 314 /** 315 * Creates a new ZIP OutputStream writing to a Path. Will use 316 * random access if possible. 317 * @param file the file to zip to 318 * @param options options specifying how the file is opened. 319 * @throws IOException on error 320 * @since 1.21 321 */ 322 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 323 def = new Deflater(level, true); 324 OutputStream outputStream = null; 325 SeekableByteChannel channel = null; 326 StreamCompressor streamCompressor = null; 327 try { 328 channel = Files.newByteChannel(file, 329 EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE, 330 StandardOpenOption.READ, 331 StandardOpenOption.TRUNCATE_EXISTING)); 332 // will never get opened properly when an exception is thrown so doesn't need to get closed 333 streamCompressor = StreamCompressor.create(channel, def); //NOSONAR 334 } catch (final IOException e) { // NOSONAR 335 IOUtils.closeQuietly(channel); 336 channel = null; 337 outputStream = Files.newOutputStream(file, options); 338 streamCompressor = StreamCompressor.create(outputStream, def); 339 } 340 this.outputStream = outputStream; 341 this.channel = channel; 342 this.streamCompressor = streamCompressor; 343 this.isSplitZip = false; 344 } 345 346 /** 347 * Creates a split ZIP Archive. 348 * 349 * <p>The files making up the archive will use Z01, Z02, 350 * ... extensions and the last part of it will be the given {@code 351 * file}.</p> 352 * 353 * <p>Even though the stream writes to a file this stream will 354 * behave as if no random access was possible. This means the 355 * sizes of stored entries need to be known before the actual 356 * entry data is written.</p> 357 * 358 * @param file the file that will become the last part of the split archive 359 * @param zipSplitSize maximum size of a single part of the split 360 * archive created by this stream. Must be between 64kB and about 361 * 4GB. 362 * 363 * @throws IOException on error 364 * @throws IllegalArgumentException if zipSplitSize is not in the required range 365 * @since 1.20 366 */ 367 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 368 this(file.toPath(), zipSplitSize); 369 } 370 371 /** 372 * Creates a split ZIP Archive. 373 * <p>The files making up the archive will use Z01, Z02, 374 * ... extensions and the last part of it will be the given {@code 375 * file}.</p> 376 * <p>Even though the stream writes to a file this stream will 377 * behave as if no random access was possible. This means the 378 * sizes of stored entries need to be known before the actual 379 * entry data is written.</p> 380 * @param path the path to the file that will become the last part of the split archive 381 * @param zipSplitSize maximum size of a single part of the split 382 * archive created by this stream. Must be between 64kB and about 4GB. 383 * @throws IOException on error 384 * @throws IllegalArgumentException if zipSplitSize is not in the required range 385 * @since 1.22 386 */ 387 public ZipArchiveOutputStream(final Path path, final long zipSplitSize) throws IOException { 388 def = new Deflater(level, true); 389 this.outputStream = new ZipSplitOutputStream(path, zipSplitSize); 390 streamCompressor = StreamCompressor.create(this.outputStream, def); 391 channel = null; 392 isSplitZip = true; 393 } 394 395 /** 396 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 397 * 398 * <p>{@link 399 * org.apache.commons.compress.utils.SeekableInMemoryByteChannel} 400 * allows you to write to an in-memory archive using random 401 * access.</p> 402 * 403 * @param channel the channel to zip to 404 * @since 1.13 405 */ 406 public ZipArchiveOutputStream(final SeekableByteChannel channel) { 407 this.channel = channel; 408 def = new Deflater(level, true); 409 streamCompressor = StreamCompressor.create(channel, def); 410 outputStream = null; 411 isSplitZip = false; 412 } 413 414 /** 415 * This method indicates whether this archive is writing to a 416 * seekable stream (i.e., to a random access file). 417 * 418 * <p>For seekable streams, you don't need to calculate the CRC or 419 * uncompressed size for {@link #STORED} entries before 420 * invoking {@link #putArchiveEntry(ArchiveEntry)}. 421 * @return true if seekable 422 */ 423 public boolean isSeekable() { 424 return channel != null; 425 } 426 427 /** 428 * The encoding to use for file names and the file comment. 429 * 430 * <p>For a list of possible values see <a 431 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 432 * Defaults to UTF-8.</p> 433 * @param encoding the encoding to use for file names, use null 434 * for the platform's default encoding 435 */ 436 public void setEncoding(final String encoding) { 437 this.encoding = encoding; 438 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 439 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 440 useUTF8Flag = false; 441 } 442 } 443 444 /** 445 * The encoding to use for file names and the file comment. 446 * 447 * @return null if using the platform's default character encoding. 448 */ 449 public String getEncoding() { 450 return encoding; 451 } 452 453 /** 454 * Whether to set the language encoding flag if the file name 455 * encoding is UTF-8. 456 * 457 * <p>Defaults to true.</p> 458 * 459 * @param b whether to set the language encoding flag if the file 460 * name encoding is UTF-8 461 */ 462 public void setUseLanguageEncodingFlag(final boolean b) { 463 useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding); 464 } 465 466 /** 467 * Whether to create Unicode Extra Fields. 468 * 469 * <p>Defaults to NEVER.</p> 470 * 471 * @param b whether to create Unicode Extra Fields. 472 */ 473 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 474 createUnicodeExtraFields = b; 475 } 476 477 /** 478 * Whether to fall back to UTF and the language encoding flag if 479 * the file name cannot be encoded using the specified encoding. 480 * 481 * <p>Defaults to false.</p> 482 * 483 * @param b whether to fall back to UTF and the language encoding 484 * flag if the file name cannot be encoded using the specified 485 * encoding. 486 */ 487 public void setFallbackToUTF8(final boolean b) { 488 fallbackToUTF8 = b; 489 } 490 491 /** 492 * Whether Zip64 extensions will be used. 493 * 494 * <p>When setting the mode to {@link Zip64Mode#Never Never}, 495 * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link 496 * #finish} or {@link #close} may throw a {@link 497 * Zip64RequiredException} if the entry's size or the total size 498 * of the archive exceeds 4GB or there are more than 65536 entries 499 * inside the archive. Any archive created in this mode will be 500 * readable by implementations that don't support Zip64.</p> 501 * 502 * <p>When setting the mode to {@link Zip64Mode#Always Always}, 503 * Zip64 extensions will be used for all entries. Any archive 504 * created in this mode may be unreadable by implementations that 505 * don't support Zip64 even if all its contents would be.</p> 506 * 507 * <p>When setting the mode to {@link Zip64Mode#AsNeeded 508 * AsNeeded}, Zip64 extensions will transparently be used for 509 * those entries that require them. This mode can only be used if 510 * the uncompressed size of the {@link ZipArchiveEntry} is known 511 * when calling {@link #putArchiveEntry} or the archive is written 512 * to a seekable output (i.e. you have used the {@link 513 * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - 514 * this mode is not valid when the output stream is not seekable 515 * and the uncompressed size is unknown when {@link 516 * #putArchiveEntry} is called.</p> 517 * 518 * <p>If no entry inside the resulting archive requires Zip64 519 * extensions then {@link Zip64Mode#Never Never} will create the 520 * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will 521 * create a slightly bigger archive if the uncompressed size of 522 * any entry has initially been unknown and create an archive 523 * identical to {@link Zip64Mode#Never Never} otherwise. {@link 524 * Zip64Mode#Always Always} will create an archive that is at 525 * least 24 bytes per entry bigger than the one {@link 526 * Zip64Mode#Never Never} would create.</p> 527 * 528 * <p>Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless 529 * {@link #putArchiveEntry} is called with an entry of unknown 530 * size and data is written to a non-seekable stream - in this 531 * case the default is {@link Zip64Mode#Never Never}.</p> 532 * 533 * @since 1.3 534 * @param mode Whether Zip64 extensions will be used. 535 */ 536 public void setUseZip64(final Zip64Mode mode) { 537 zip64Mode = mode; 538 } 539 540 /** 541 * Returns the total number of bytes written to this stream. 542 * @return the number of written bytes 543 * @since 1.22 544 */ 545 @Override 546 public long getBytesWritten() { 547 return streamCompressor.getTotalBytesWritten(); 548 } 549 550 /** 551 * {@inheritDoc} 552 * @throws Zip64RequiredException if the archive's size exceeds 4 553 * GByte or there are more than 65535 entries inside the archive 554 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 555 */ 556 @Override 557 public void finish() throws IOException { 558 if (finished) { 559 throw new IOException("This archive has already been finished"); 560 } 561 562 if (entry != null) { 563 throw new IOException("This archive contains unclosed entries."); 564 } 565 566 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 567 cdOffset = cdOverallOffset; 568 if (isSplitZip) { 569 // when creating a split zip, the offset should be 570 // the offset to the corresponding segment disk 571 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream)this.outputStream; 572 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 573 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 574 } 575 writeCentralDirectoryInChunks(); 576 577 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 578 579 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 580 final ByteBuffer commentData = this.zipEncoding.encode(comment); 581 final long commentLength = (long) commentData.limit() - commentData.position(); 582 eocdLength = WORD /* length of EOCD_SIG */ 583 + SHORT /* number of this disk */ 584 + SHORT /* disk number of start of central directory */ 585 + SHORT /* total number of entries on this disk */ 586 + SHORT /* total number of entries */ 587 + WORD /* size of central directory */ 588 + WORD /* offset of start of central directory */ 589 + SHORT /* zip comment length */ 590 + commentLength /* zip comment */; 591 592 writeZip64CentralDirectory(); 593 writeCentralDirectoryEnd(); 594 metaData.clear(); 595 entries.clear(); 596 streamCompressor.close(); 597 if (isSplitZip) { 598 // trigger the ZipSplitOutputStream to write the final split segment 599 outputStream.close(); 600 } 601 finished = true; 602 } 603 604 private void writeCentralDirectoryInChunks() throws IOException { 605 final int NUM_PER_WRITE = 1000; 606 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 607 int count = 0; 608 for (final ZipArchiveEntry ze : entries) { 609 byteArrayOutputStream.write(createCentralFileHeader(ze)); 610 if (++count > NUM_PER_WRITE){ 611 writeCounted(byteArrayOutputStream.toByteArray()); 612 byteArrayOutputStream.reset(); 613 count = 0; 614 } 615 } 616 writeCounted(byteArrayOutputStream.toByteArray()); 617 } 618 619 /** 620 * Writes all necessary data for this entry. 621 * @throws IOException on error 622 * @throws Zip64RequiredException if the entry's uncompressed or 623 * compressed size exceeds 4 GByte and {@link #setUseZip64} 624 * is {@link Zip64Mode#Never}. 625 */ 626 @Override 627 public void closeArchiveEntry() throws IOException { 628 preClose(); 629 630 flushDeflater(); 631 632 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 633 final long realCrc = streamCompressor.getCrc32(); 634 entry.bytesRead = streamCompressor.getBytesRead(); 635 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 636 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 637 closeEntry(actuallyNeedsZip64, false); 638 streamCompressor.reset(); 639 } 640 641 /** 642 * Writes all necessary data for this entry. 643 * 644 * @param phased This entry is second phase of a 2-phase zip creation, size, compressed size and crc 645 * are known in ZipArchiveEntry 646 * @throws IOException on error 647 * @throws Zip64RequiredException if the entry's uncompressed or 648 * compressed size exceeds 4 GByte and {@link #setUseZip64} 649 * is {@link Zip64Mode#Never}. 650 */ 651 private void closeCopiedEntry(final boolean phased) throws IOException { 652 preClose(); 653 entry.bytesRead = entry.entry.getSize(); 654 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 655 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 656 closeEntry(actuallyNeedsZip64, phased); 657 } 658 659 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 660 if (!phased && channel != null) { 661 rewriteSizesAndCrc(actuallyNeedsZip64); 662 } 663 664 if (!phased) { 665 writeDataDescriptor(entry.entry); 666 } 667 entry = null; 668 } 669 670 private void preClose() throws IOException { 671 if (finished) { 672 throw new IOException("Stream has already been finished"); 673 } 674 675 if (entry == null) { 676 throw new IOException("No current entry to close"); 677 } 678 679 if (!entry.hasWritten) { 680 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 681 } 682 } 683 684 /** 685 * Adds an archive entry with a raw input stream. 686 * 687 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. 688 * Zip64 status is re-established based on the settings in this stream, and the supplied value 689 * is ignored. 690 * 691 * The entry is put and closed immediately. 692 * 693 * @param entry The archive entry to add 694 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 695 * @throws IOException If copying fails 696 */ 697 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) 698 throws IOException { 699 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 700 if (hasZip64Extra(ae)) { 701 // Will be re-added as required. this may make the file generated with this method 702 // somewhat smaller than standard mode, 703 // since standard mode is unable to remove the zip 64 header. 704 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 705 } 706 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN 707 && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 708 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 709 putArchiveEntry(ae, is2PhaseSource); 710 copyFromZipInputStream(rawStream); 711 closeCopiedEntry(is2PhaseSource); 712 } 713 714 /** 715 * Ensures all bytes sent to the deflater are written to the stream. 716 */ 717 private void flushDeflater() throws IOException { 718 if (entry.entry.getMethod() == DEFLATED) { 719 streamCompressor.flushDeflater(); 720 } 721 } 722 723 /** 724 * Ensures the current entry's size and CRC information is set to 725 * the values just written, verifies it isn't too big in the 726 * Zip64Mode.Never case and returns whether the entry would 727 * require a Zip64 extra field. 728 */ 729 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, 730 final Zip64Mode effectiveMode) 731 throws ZipException { 732 if (entry.entry.getMethod() == DEFLATED) { 733 /* It turns out def.getBytesRead() returns wrong values if 734 * the size exceeds 4 GB on Java < Java7 735 entry.entry.setSize(def.getBytesRead()); 736 */ 737 entry.entry.setSize(entry.bytesRead); 738 entry.entry.setCompressedSize(bytesWritten); 739 entry.entry.setCrc(crc); 740 741 } else if (channel == null) { 742 if (entry.entry.getCrc() != crc) { 743 throw new ZipException("Bad CRC checksum for entry " 744 + entry.entry.getName() + ": " 745 + Long.toHexString(entry.entry.getCrc()) 746 + " instead of " 747 + Long.toHexString(crc)); 748 } 749 750 if (entry.entry.getSize() != bytesWritten) { 751 throw new ZipException("Bad size for entry " 752 + entry.entry.getName() + ": " 753 + entry.entry.getSize() 754 + " instead of " 755 + bytesWritten); 756 } 757 } else { /* method is STORED and we used SeekableByteChannel */ 758 entry.entry.setSize(bytesWritten); 759 entry.entry.setCompressedSize(bytesWritten); 760 entry.entry.setCrc(crc); 761 } 762 763 return checkIfNeedsZip64(effectiveMode); 764 } 765 766 /** 767 * Verifies the sizes aren't too big in the Zip64Mode.Never case 768 * and returns whether the entry would require a Zip64 extra 769 * field. 770 */ 771 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) 772 throws ZipException { 773 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 774 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 775 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 776 } 777 return actuallyNeedsZip64; 778 } 779 780 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 781 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility 782 || isTooLargeForZip32(entry1); 783 } 784 785 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry){ 786 return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC; 787 } 788 789 /** 790 * When using random access output, write the local file header 791 * and potentially the ZIP64 extra containing the correct CRC and 792 * compressed/uncompressed sizes. 793 */ 794 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) 795 throws IOException { 796 final long save = channel.position(); 797 798 channel.position(entry.localDataStart); 799 writeOut(ZipLong.getBytes(entry.entry.getCrc())); 800 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 801 writeOut(ZipLong.getBytes(entry.entry.getCompressedSize())); 802 writeOut(ZipLong.getBytes(entry.entry.getSize())); 803 } else { 804 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 805 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 806 } 807 808 if (hasZip64Extra(entry.entry)) { 809 final ByteBuffer name = getName(entry.entry); 810 final int nameLen = name.limit() - name.position(); 811 // seek to ZIP64 extra, skip header and size information 812 channel.position(entry.localDataStart + 3 * WORD + 2 * SHORT 813 + nameLen + 2 * SHORT); 814 // inside the ZIP64 extra uncompressed size comes 815 // first, unlike the LFH, CD or data descriptor 816 writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize())); 817 writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize())); 818 819 if (!actuallyNeedsZip64) { 820 // do some cleanup: 821 // * rewrite version needed to extract 822 channel.position(entry.localDataStart - 5 * SHORT); 823 writeOut(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false))); 824 825 // * remove ZIP64 extra so it doesn't get written 826 // to the central directory 827 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField 828 .HEADER_ID); 829 entry.entry.setExtra(); 830 831 // * reset hasUsedZip64 if it has been set because 832 // of this entry 833 if (entry.causedUseOfZip64) { 834 hasUsedZip64 = false; 835 } 836 } 837 } 838 channel.position(save); 839 } 840 841 /** 842 * {@inheritDoc} 843 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 844 * @throws Zip64RequiredException if the entry's uncompressed or 845 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 846 * is {@link Zip64Mode#Never}. 847 */ 848 @Override 849 public void putArchiveEntry(final ArchiveEntry archiveEntry) throws IOException { 850 putArchiveEntry(archiveEntry, false); 851 } 852 853 /** 854 * Writes the headers for an archive entry to the output stream. 855 * The caller must then write the content to the stream and call 856 * {@link #closeArchiveEntry()} to complete the process. 857 858 * @param archiveEntry The archiveEntry 859 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 860 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 861 * @throws Zip64RequiredException if the entry's uncompressed or 862 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 863 * is {@link Zip64Mode#Never}. 864 */ 865 private void putArchiveEntry(final ArchiveEntry archiveEntry, final boolean phased) throws IOException { 866 if (finished) { 867 throw new IOException("Stream has already been finished"); 868 } 869 870 if (entry != null) { 871 closeArchiveEntry(); 872 } 873 874 entry = new CurrentEntry((ZipArchiveEntry) archiveEntry); 875 entries.add(entry.entry); 876 877 setDefaults(entry.entry); 878 879 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 880 validateSizeInformation(effectiveMode); 881 882 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 883 884 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 885 886 final ZipEightByteInteger size; 887 final ZipEightByteInteger compressedSize; 888 if (phased) { 889 // sizes are already known 890 size = new ZipEightByteInteger(entry.entry.getSize()); 891 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 892 } else if (entry.entry.getMethod() == STORED 893 && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 894 // actually, we already know the sizes 895 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 896 } else { 897 // just a placeholder, real data will be in data 898 // descriptor or inserted later via SeekableByteChannel 899 compressedSize = size = ZipEightByteInteger.ZERO; 900 } 901 z64.setSize(size); 902 z64.setCompressedSize(compressedSize); 903 entry.entry.setExtra(); 904 } 905 906 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 907 def.setLevel(level); 908 hasCompressionLevelChanged = false; 909 } 910 writeLocalFileHeader((ZipArchiveEntry) archiveEntry, phased); 911 } 912 913 /** 914 * Provides default values for compression method and last 915 * modification time. 916 */ 917 private void setDefaults(final ZipArchiveEntry entry) { 918 if (entry.getMethod() == -1) { // not specified 919 entry.setMethod(method); 920 } 921 922 if (entry.getTime() == -1) { // not specified 923 entry.setTime(System.currentTimeMillis()); 924 } 925 } 926 927 /** 928 * Throws an exception if the size is unknown for a stored entry 929 * that is written to a non-seekable output or the entry is too 930 * big to be written without Zip64 extra but the mode has been set 931 * to Never. 932 */ 933 private void validateSizeInformation(final Zip64Mode effectiveMode) 934 throws ZipException { 935 // Size/CRC not required if SeekableByteChannel is used 936 if (entry.entry.getMethod() == STORED && channel == null) { 937 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 938 throw new ZipException("Uncompressed size is required for" 939 + " STORED method when not writing to a" 940 + " file"); 941 } 942 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 943 throw new ZipException("CRC checksum is required for STORED" 944 + " method when not writing to a file"); 945 } 946 entry.entry.setCompressedSize(entry.entry.getSize()); 947 } 948 949 if ((entry.entry.getSize() >= ZIP64_MAGIC 950 || entry.entry.getCompressedSize() >= ZIP64_MAGIC) 951 && effectiveMode == Zip64Mode.Never) { 952 throw new Zip64RequiredException(Zip64RequiredException 953 .getEntryTooBigMessage(entry.entry)); 954 } 955 } 956 957 /** 958 * Whether to add a Zip64 extended information extra field to the 959 * local file header. 960 * 961 * <p>Returns true if</p> 962 * 963 * <ul> 964 * <li>mode is Always</li> 965 * <li>or we already know it is going to be needed</li> 966 * <li>or the size is unknown and we can ensure it won't hurt 967 * other implementations if we add it (i.e. we can erase its 968 * usage</li> 969 * </ul> 970 */ 971 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 972 return mode == Zip64Mode.Always 973 || mode == Zip64Mode.AlwaysWithCompatibility 974 || entry.getSize() >= ZIP64_MAGIC 975 || entry.getCompressedSize() >= ZIP64_MAGIC 976 || (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN 977 && channel != null && mode != Zip64Mode.Never); 978 } 979 980 /** 981 * Set the file comment. 982 * @param comment the comment 983 */ 984 public void setComment(final String comment) { 985 this.comment = comment; 986 } 987 988 /** 989 * Sets the compression level for subsequent entries. 990 * 991 * <p>Default is Deflater.DEFAULT_COMPRESSION.</p> 992 * @param level the compression level. 993 * @throws IllegalArgumentException if an invalid compression 994 * level is specified. 995 */ 996 public void setLevel(final int level) { 997 if (level < Deflater.DEFAULT_COMPRESSION 998 || level > Deflater.BEST_COMPRESSION) { 999 throw new IllegalArgumentException("Invalid compression level: " 1000 + level); 1001 } 1002 if (this.level == level) { 1003 return; 1004 } 1005 hasCompressionLevelChanged = true; 1006 this.level = level; 1007 } 1008 1009 /** 1010 * Sets the default compression method for subsequent entries. 1011 * 1012 * <p>Default is DEFLATED.</p> 1013 * @param method an {@code int} from java.util.zip.ZipEntry 1014 */ 1015 public void setMethod(final int method) { 1016 this.method = method; 1017 } 1018 1019 /** 1020 * Whether this stream is able to write the given entry. 1021 * 1022 * <p>May return false if it is set up to use encryption or a 1023 * compression method that hasn't been implemented yet.</p> 1024 * @since 1.1 1025 */ 1026 @Override 1027 public boolean canWriteEntryData(final ArchiveEntry ae) { 1028 if (ae instanceof ZipArchiveEntry) { 1029 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 1030 return zae.getMethod() != ZipMethod.IMPLODING.getCode() 1031 && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() 1032 && ZipUtil.canHandleEntryData(zae); 1033 } 1034 return false; 1035 } 1036 1037 /** 1038 * Write preamble data. For most of time, this is used to 1039 * make self-extracting zips. 1040 * 1041 * @param preamble data to write 1042 * @throws IOException if an entry already exists 1043 * @since 1.21 1044 */ 1045 public void writePreamble(final byte[] preamble) throws IOException { 1046 writePreamble(preamble, 0, preamble.length); 1047 } 1048 1049 /** 1050 * Write preamble data. For most of time, this is used to 1051 * make self-extracting zips. 1052 * 1053 * @param preamble data to write 1054 * @param offset the start offset in the data 1055 * @param length the number of bytes to write 1056 * @throws IOException if an entry already exists 1057 * @since 1.21 1058 */ 1059 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1060 if (entry != null) { 1061 throw new IllegalStateException("Preamble must be written before creating an entry"); 1062 } 1063 this.streamCompressor.writeCounted(preamble, offset, length); 1064 } 1065 1066 /** 1067 * Writes bytes to ZIP entry. 1068 * @param b the byte array to write 1069 * @param offset the start position to write from 1070 * @param length the number of bytes to write 1071 * @throws IOException on error 1072 */ 1073 @Override 1074 public void write(final byte[] b, final int offset, final int length) throws IOException { 1075 if (entry == null) { 1076 throw new IllegalStateException("No current entry"); 1077 } 1078 ZipUtil.checkRequestedFeatures(entry.entry); 1079 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1080 count(writtenThisTime); 1081 } 1082 1083 /** 1084 * Write bytes to output or random access file. 1085 * @param data the byte array to write 1086 * @throws IOException on error 1087 */ 1088 private void writeCounted(final byte[] data) throws IOException { 1089 streamCompressor.writeCounted(data); 1090 } 1091 1092 private void copyFromZipInputStream(final InputStream src) throws IOException { 1093 if (entry == null) { 1094 throw new IllegalStateException("No current entry"); 1095 } 1096 ZipUtil.checkRequestedFeatures(entry.entry); 1097 entry.hasWritten = true; 1098 int length; 1099 while ((length = src.read(copyBuffer)) >= 0 ) 1100 { 1101 streamCompressor.writeCounted(copyBuffer, 0, length); 1102 count( length ); 1103 } 1104 } 1105 1106 /** 1107 * Closes this output stream and releases any system resources 1108 * associated with the stream. 1109 * 1110 * @throws IOException if an I/O error occurs. 1111 * @throws Zip64RequiredException if the archive's size exceeds 4 1112 * GByte or there are more than 65535 entries inside the archive 1113 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 1114 */ 1115 @Override 1116 public void close() throws IOException { 1117 try { 1118 if (!finished) { 1119 finish(); 1120 } 1121 } finally { 1122 destroy(); 1123 } 1124 } 1125 1126 /** 1127 * Flushes this output stream and forces any buffered output bytes 1128 * to be written out to the stream. 1129 * 1130 * @throws IOException if an I/O error occurs. 1131 */ 1132 @Override 1133 public void flush() throws IOException { 1134 if (outputStream != null) { 1135 outputStream.flush(); 1136 } 1137 } 1138 1139 /* 1140 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 1141 */ 1142 /** 1143 * local file header signature 1144 */ 1145 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); //NOSONAR 1146 /** 1147 * data descriptor signature 1148 */ 1149 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); //NOSONAR 1150 /** 1151 * central file header signature 1152 */ 1153 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); //NOSONAR 1154 /** 1155 * end of central dir signature 1156 */ 1157 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); //NOSONAR 1158 /** 1159 * ZIP64 end of central dir signature 1160 */ 1161 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); //NOSONAR 1162 /** 1163 * ZIP64 end of central dir locator signature 1164 */ 1165 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); //NOSONAR 1166 1167 /** 1168 * Writes next block of compressed data to the output stream. 1169 * @throws IOException on error 1170 */ 1171 protected final void deflate() throws IOException { 1172 streamCompressor.deflate(); 1173 } 1174 1175 /** 1176 * Writes the local file header entry 1177 * @param ze the entry to write 1178 * @throws IOException on error 1179 */ 1180 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1181 writeLocalFileHeader(ze, false); 1182 } 1183 1184 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1185 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1186 final ByteBuffer name = getName(ze); 1187 1188 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1189 addUnicodeExtraFields(ze, encodable, name); 1190 } 1191 1192 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1193 if (isSplitZip) { 1194 // when creating a split zip, the offset should be 1195 // the offset to the corresponding segment disk 1196 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream)this.outputStream; 1197 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1198 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1199 } 1200 1201 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1202 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1203 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1204 writeCounted(localHeader); 1205 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1206 } 1207 1208 1209 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, 1210 final boolean phased, final long archiveOffset) { 1211 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 1212 if (oldEx != null) { 1213 ze.removeExtraField(ResourceAlignmentExtraField.ID); 1214 } 1215 final ResourceAlignmentExtraField oldAlignmentEx = 1216 oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 1217 1218 int alignment = ze.getAlignment(); 1219 if (alignment <= 0 && oldAlignmentEx != null) { 1220 alignment = oldAlignmentEx.getAlignment(); 1221 } 1222 1223 if (alignment > 1 || (oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange())) { 1224 final int oldLength = LFH_FILENAME_OFFSET + 1225 name.limit() - name.position() + 1226 ze.getLocalFileDataExtra().length; 1227 1228 final int padding = (int) ((-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE 1229 - ResourceAlignmentExtraField.BASE_SIZE) & 1230 (alignment - 1)); 1231 ze.addExtraField(new ResourceAlignmentExtraField(alignment, 1232 oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 1233 } 1234 1235 final byte[] extra = ze.getLocalFileDataExtra(); 1236 final int nameLen = name.limit() - name.position(); 1237 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 1238 final byte[] buf = new byte[len]; 1239 1240 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, WORD); 1241 1242 //store method in local variable to prevent multiple method calls 1243 final int zipMethod = ze.getMethod(); 1244 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 1245 1246 putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 1247 1248 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 1249 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 1250 1251 // compression method 1252 putShort(zipMethod, buf, LFH_METHOD_OFFSET); 1253 1254 ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, LFH_TIME_OFFSET); 1255 1256 // CRC 1257 if (phased || !(zipMethod == DEFLATED || channel != null)){ 1258 putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 1259 } else { 1260 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, WORD); 1261 } 1262 1263 // compressed length 1264 // uncompressed length 1265 if (hasZip64Extra(entry.entry)){ 1266 // point to ZIP64 extended information extra field for 1267 // sizes, may get rewritten once sizes are known if 1268 // stream is seekable 1269 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 1270 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 1271 } else if (phased) { 1272 putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 1273 putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 1274 } else if (zipMethod == DEFLATED || channel != null) { 1275 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, WORD); 1276 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, WORD); 1277 } else { // Stored 1278 putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 1279 putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 1280 } 1281 // file name length 1282 putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 1283 1284 // extra field length 1285 putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 1286 1287 // file name 1288 System.arraycopy( name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 1289 1290 // extra fields 1291 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 1292 1293 return buf; 1294 } 1295 1296 1297 /** 1298 * Adds UnicodeExtra fields for name and file comment if mode is 1299 * ALWAYS or the data cannot be encoded using the configured 1300 * encoding. 1301 */ 1302 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, 1303 final ByteBuffer name) 1304 throws IOException { 1305 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 1306 || !encodable) { 1307 ze.addExtraField(new UnicodePathExtraField(ze.getName(), 1308 name.array(), 1309 name.arrayOffset(), 1310 name.limit() 1311 - name.position())); 1312 } 1313 1314 final String comm = ze.getComment(); 1315 if (comm != null && !comm.isEmpty()) { 1316 1317 final boolean commentEncodable = zipEncoding.canEncode(comm); 1318 1319 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 1320 || !commentEncodable) { 1321 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 1322 ze.addExtraField(new UnicodeCommentExtraField(comm, 1323 commentB.array(), 1324 commentB.arrayOffset(), 1325 commentB.limit() 1326 - commentB.position()) 1327 ); 1328 } 1329 } 1330 } 1331 1332 /** 1333 * Writes the data descriptor entry. 1334 * @param ze the entry to write 1335 * @throws IOException on error 1336 */ 1337 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1338 if (!usesDataDescriptor(ze.getMethod(), false)) { 1339 return; 1340 } 1341 writeCounted(DD_SIG); 1342 writeCounted(ZipLong.getBytes(ze.getCrc())); 1343 if (!hasZip64Extra(ze)) { 1344 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1345 writeCounted(ZipLong.getBytes(ze.getSize())); 1346 } else { 1347 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1348 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1349 } 1350 } 1351 1352 /** 1353 * Writes the central file header entry. 1354 * @param ze the entry to write 1355 * @throws IOException on error 1356 * @throws Zip64RequiredException if the archive's size exceeds 4 1357 * GByte and {@link #setUseZip64(Zip64Mode)} is {@link 1358 * Zip64Mode#Never}. 1359 */ 1360 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1361 final byte[] centralFileHeader = createCentralFileHeader(ze); 1362 writeCounted(centralFileHeader); 1363 } 1364 1365 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1366 1367 final EntryMetaData entryMetaData = metaData.get(ze); 1368 final boolean needsZip64Extra = hasZip64Extra(ze) 1369 || ze.getCompressedSize() >= ZIP64_MAGIC 1370 || ze.getSize() >= ZIP64_MAGIC 1371 || entryMetaData.offset >= ZIP64_MAGIC 1372 || ze.getDiskNumberStart() >= ZIP64_MAGIC_SHORT 1373 || zip64Mode == Zip64Mode.Always 1374 || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 1375 1376 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 1377 // must be the offset that is too big, otherwise an 1378 // exception would have been throw in putArchiveEntry or 1379 // closeArchiveEntry 1380 throw new Zip64RequiredException(Zip64RequiredException 1381 .ARCHIVE_TOO_BIG_MESSAGE); 1382 } 1383 1384 1385 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 1386 1387 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 1388 } 1389 1390 /** 1391 * Writes the central file header entry. 1392 * @param ze the entry to write 1393 * @param name The encoded name 1394 * @param entryMetaData meta data for this file 1395 * @throws IOException on error 1396 */ 1397 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, 1398 final EntryMetaData entryMetaData, 1399 final boolean needsZip64Extra) throws IOException { 1400 if(isSplitZip) { 1401 // calculate the disk number for every central file header, 1402 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 1403 final int currentSplitSegment = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1404 if(numberOfCDInDiskData.get(currentSplitSegment) == null) { 1405 numberOfCDInDiskData.put(currentSplitSegment, 1); 1406 } else { 1407 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 1408 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 1409 } 1410 } 1411 1412 final byte[] extra = ze.getCentralDirectoryExtra(); 1413 final int extraLength = extra.length; 1414 1415 // file comment length 1416 String comm = ze.getComment(); 1417 if (comm == null) { 1418 comm = ""; 1419 } 1420 1421 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 1422 final int nameLen = name.limit() - name.position(); 1423 final int commentLen = commentB.limit() - commentB.position(); 1424 final int len= CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 1425 final byte[] buf = new byte[len]; 1426 1427 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, WORD); 1428 1429 // version made by 1430 // CheckStyle:MagicNumber OFF 1431 putShort((ze.getPlatform() << 8) | (!hasUsedZip64 ? DATA_DESCRIPTOR_MIN_VERSION : ZIP64_MIN_VERSION), 1432 buf, CFH_VERSION_MADE_BY_OFFSET); 1433 1434 final int zipMethod = ze.getMethod(); 1435 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1436 putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), 1437 buf, CFH_VERSION_NEEDED_OFFSET); 1438 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 1439 1440 // compression method 1441 putShort(zipMethod, buf, CFH_METHOD_OFFSET); 1442 1443 1444 // last mod. time and date 1445 ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, CFH_TIME_OFFSET); 1446 1447 // CRC 1448 // compressed length 1449 // uncompressed length 1450 putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 1451 if (ze.getCompressedSize() >= ZIP64_MAGIC 1452 || ze.getSize() >= ZIP64_MAGIC 1453 || zip64Mode == Zip64Mode.Always 1454 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1455 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 1456 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 1457 } else { 1458 putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 1459 putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 1460 } 1461 1462 putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 1463 1464 // extra field length 1465 putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 1466 1467 putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 1468 1469 // disk number start 1470 if(isSplitZip) { 1471 if (ze.getDiskNumberStart() >= ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 1472 putShort(ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 1473 } else { 1474 putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 1475 } 1476 } else { 1477 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, SHORT); 1478 } 1479 1480 // internal file attributes 1481 putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 1482 1483 // external file attributes 1484 putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 1485 1486 // relative offset of LFH 1487 if (entryMetaData.offset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 1488 putLong(ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 1489 } else { 1490 putLong(Math.min(entryMetaData.offset, ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 1491 } 1492 1493 // file name 1494 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 1495 1496 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 1497 System.arraycopy(extra, 0, buf, extraStart, extraLength); 1498 1499 final int commentStart = extraStart + extraLength; 1500 1501 // file comment 1502 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 1503 return buf; 1504 } 1505 1506 /** 1507 * If the entry needs Zip64 extra information inside the central 1508 * directory then configure its data. 1509 */ 1510 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, 1511 final boolean needsZip64Extra) { 1512 if (needsZip64Extra) { 1513 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1514 if (ze.getCompressedSize() >= ZIP64_MAGIC 1515 || ze.getSize() >= ZIP64_MAGIC 1516 || zip64Mode == Zip64Mode.Always 1517 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1518 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1519 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1520 } else { 1521 // reset value that may have been set for LFH 1522 z64.setCompressedSize(null); 1523 z64.setSize(null); 1524 } 1525 1526 final boolean needsToEncodeLfhOffset = 1527 lfhOffset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1528 final boolean needsToEncodeDiskNumberStart = 1529 ze.getDiskNumberStart() >= ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1530 1531 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1532 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1533 } 1534 if (needsToEncodeDiskNumberStart) { 1535 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1536 } 1537 ze.setExtra(); 1538 } 1539 } 1540 1541 /** 1542 * Writes the "End of central dir record". 1543 * @throws IOException on error 1544 * @throws Zip64RequiredException if the archive's size exceeds 4 1545 * GByte or there are more than 65535 entries inside the archive 1546 * and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1547 */ 1548 protected void writeCentralDirectoryEnd() throws IOException { 1549 if(!hasUsedZip64 && isSplitZip) { 1550 ((ZipSplitOutputStream)this.outputStream).prepareToWriteUnsplittableContent(eocdLength); 1551 } 1552 1553 validateIfZip64IsNeededInEOCD(); 1554 1555 writeCounted(EOCD_SIG); 1556 1557 // number of this disk 1558 int numberOfThisDisk = 0; 1559 if(isSplitZip) { 1560 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1561 } 1562 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1563 1564 // disk number of the start of central directory 1565 writeCounted(ZipShort.getBytes((int)cdDiskNumberStart)); 1566 1567 // number of entries 1568 final int numberOfEntries = entries.size(); 1569 1570 // total number of entries in the central directory on this disk 1571 final int numOfEntriesOnThisDisk = isSplitZip 1572 ? (numberOfCDInDiskData.get(numberOfThisDisk) == null ? 0 : numberOfCDInDiskData.get(numberOfThisDisk)) 1573 : numberOfEntries; 1574 final byte[] numOfEntriesOnThisDiskData = ZipShort 1575 .getBytes(Math.min(numOfEntriesOnThisDisk, ZIP64_MAGIC_SHORT)); 1576 writeCounted(numOfEntriesOnThisDiskData); 1577 1578 // number of entries 1579 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, 1580 ZIP64_MAGIC_SHORT)); 1581 writeCounted(num); 1582 1583 // length and location of CD 1584 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZIP64_MAGIC))); 1585 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZIP64_MAGIC))); 1586 1587 // ZIP file comment 1588 final ByteBuffer data = this.zipEncoding.encode(comment); 1589 final int dataLen = data.limit() - data.position(); 1590 writeCounted(ZipShort.getBytes(dataLen)); 1591 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1592 } 1593 1594 /** 1595 * If the Zip64 mode is set to never, then all the data in End Of Central Directory 1596 * should not exceed their limits. 1597 * @throws Zip64RequiredException if Zip64 is actually needed 1598 */ 1599 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1600 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1601 if (zip64Mode != Zip64Mode.Never) { 1602 return; 1603 } 1604 1605 int numberOfThisDisk = 0; 1606 if (isSplitZip) { 1607 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1608 } 1609 if (numberOfThisDisk >= ZIP64_MAGIC_SHORT) { 1610 throw new Zip64RequiredException(Zip64RequiredException 1611 .NUMBER_OF_THIS_DISK_TOO_BIG_MESSAGE); 1612 } 1613 1614 if (cdDiskNumberStart >= ZIP64_MAGIC_SHORT) { 1615 throw new Zip64RequiredException(Zip64RequiredException 1616 .NUMBER_OF_THE_DISK_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1617 } 1618 1619 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.get(numberOfThisDisk) == null 1620 ? 0 : numberOfCDInDiskData.get(numberOfThisDisk); 1621 if (numOfEntriesOnThisDisk >= ZIP64_MAGIC_SHORT) { 1622 throw new Zip64RequiredException(Zip64RequiredException 1623 .TOO_MANY_ENTRIES_ON_THIS_DISK_MESSAGE); 1624 } 1625 1626 // number of entries 1627 if (entries.size() >= ZIP64_MAGIC_SHORT) { 1628 throw new Zip64RequiredException(Zip64RequiredException 1629 .TOO_MANY_ENTRIES_MESSAGE); 1630 } 1631 1632 if (cdLength >= ZIP64_MAGIC) { 1633 throw new Zip64RequiredException(Zip64RequiredException 1634 .SIZE_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1635 } 1636 1637 if (cdOffset >= ZIP64_MAGIC) { 1638 throw new Zip64RequiredException(Zip64RequiredException 1639 .ARCHIVE_TOO_BIG_MESSAGE); 1640 } 1641 } 1642 1643 /** 1644 * Writes the "ZIP64 End of central dir record" and 1645 * "ZIP64 End of central dir locator". 1646 * @throws IOException on error 1647 * @since 1.3 1648 */ 1649 protected void writeZip64CentralDirectory() throws IOException { 1650 if (zip64Mode == Zip64Mode.Never) { 1651 return; 1652 } 1653 1654 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1655 // actually "will use" 1656 hasUsedZip64 = true; 1657 } 1658 1659 if (!hasUsedZip64) { 1660 return; 1661 } 1662 1663 long offset = streamCompressor.getTotalBytesWritten(); 1664 long diskNumberStart = 0L; 1665 if(isSplitZip) { 1666 // when creating a split zip, the offset of should be 1667 // the offset to the corresponding segment disk 1668 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream)this.outputStream; 1669 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1670 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1671 } 1672 1673 1674 writeOut(ZIP64_EOCD_SIG); 1675 // size of zip64 end of central directory, we don't have any variable length 1676 // as we don't support the extensible data sector, yet 1677 writeOut(ZipEightByteInteger 1678 .getBytes(SHORT /* version made by */ 1679 + SHORT /* version needed to extract */ 1680 + WORD /* disk number */ 1681 + WORD /* disk with central directory */ 1682 + DWORD /* number of entries in CD on this disk */ 1683 + DWORD /* total number of entries */ 1684 + DWORD /* size of CD */ 1685 + (long) DWORD /* offset of CD */ 1686 )); 1687 1688 // version made by and version needed to extract 1689 writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION)); 1690 writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION)); 1691 1692 // number of this disk 1693 int numberOfThisDisk = 0; 1694 if (isSplitZip) { 1695 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1696 } 1697 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1698 1699 // disk number of the start of central directory 1700 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1701 1702 // total number of entries in the central directory on this disk 1703 final int numOfEntriesOnThisDisk = isSplitZip 1704 ? (numberOfCDInDiskData.get(numberOfThisDisk) == null ? 0 : numberOfCDInDiskData.get(numberOfThisDisk)) 1705 : entries.size(); 1706 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1707 writeOut(numOfEntriesOnThisDiskData); 1708 1709 // number of entries 1710 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1711 writeOut(num); 1712 1713 // length and location of CD 1714 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1715 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1716 1717 // no "zip64 extensible data sector" for now 1718 1719 if(isSplitZip) { 1720 // based on the zip specification, the End Of Central Directory record and 1721 // the Zip64 End Of Central Directory locator record must be on the same segment 1722 final int zip64EOCDLOCLength = WORD /* length of ZIP64_EOCD_LOC_SIG */ 1723 + WORD /* disk number of ZIP64_EOCD_SIG */ 1724 + DWORD /* offset of ZIP64_EOCD_SIG */ 1725 + WORD /* total number of disks */; 1726 1727 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 1728 ((ZipSplitOutputStream)this.outputStream).prepareToWriteUnsplittableContent(unsplittableContentSize); 1729 } 1730 1731 // and now the "ZIP64 end of central directory locator" 1732 writeOut(ZIP64_EOCD_LOC_SIG); 1733 1734 // disk number holding the ZIP64 EOCD record 1735 writeOut(ZipLong.getBytes(diskNumberStart)); 1736 // relative offset of ZIP64 EOCD record 1737 writeOut(ZipEightByteInteger.getBytes(offset)); 1738 // total number of disks 1739 if(isSplitZip) { 1740 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 1741 // in the same split disk, it means they must be located in the last disk 1742 final int totalNumberOfDisks = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex() + 1; 1743 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 1744 } else { 1745 writeOut(ONE); 1746 } 1747 } 1748 1749 /** 1750 * 4.4.1.4 If one of the fields in the end of central directory 1751 * record is too small to hold required data, the field SHOULD be 1752 * set to -1 (0xFFFF or 0xFFFFFFFF) and the ZIP64 format record 1753 * SHOULD be created. 1754 * @return true if zip64 End Of Central Directory is needed 1755 */ 1756 private boolean shouldUseZip64EOCD() { 1757 int numberOfThisDisk = 0; 1758 if(isSplitZip) { 1759 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1760 } 1761 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.get(numberOfThisDisk) == null ? 0 : numberOfCDInDiskData.get(numberOfThisDisk); 1762 return numberOfThisDisk >= ZIP64_MAGIC_SHORT /* number of this disk */ 1763 || cdDiskNumberStart >= ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1764 || numOfEntriesOnThisDisk >= ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1765 || entries.size() >= ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1766 || cdLength >= ZIP64_MAGIC /* size of the central directory */ 1767 || cdOffset >= ZIP64_MAGIC; /* offset of start of central directory with respect to 1768 the starting disk number */ 1769 } 1770 1771 /** 1772 * Write bytes to output or random access file. 1773 * @param data the byte array to write 1774 * @throws IOException on error 1775 */ 1776 protected final void writeOut(final byte[] data) throws IOException { 1777 streamCompressor.writeOut(data, 0, data.length); 1778 } 1779 1780 1781 /** 1782 * Write bytes to output or random access file. 1783 * @param data the byte array to write 1784 * @param offset the start position to write from 1785 * @param length the number of bytes to write 1786 * @throws IOException on error 1787 */ 1788 protected final void writeOut(final byte[] data, final int offset, final int length) 1789 throws IOException { 1790 streamCompressor.writeOut(data, offset, length); 1791 } 1792 1793 1794 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1795 final GeneralPurposeBit b = new GeneralPurposeBit(); 1796 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1797 if (usesDataDescriptor) { 1798 b.useDataDescriptor(true); 1799 } 1800 return b; 1801 } 1802 1803 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1804 if (zip64) { 1805 return ZIP64_MIN_VERSION; 1806 } 1807 if (usedDataDescriptor) { 1808 return DATA_DESCRIPTOR_MIN_VERSION; 1809 } 1810 return versionNeededToExtractMethod(zipMethod); 1811 } 1812 1813 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1814 return !phased && zipMethod == DEFLATED && channel == null; 1815 } 1816 1817 private int versionNeededToExtractMethod(final int zipMethod) { 1818 return zipMethod == DEFLATED ? DEFLATE_MIN_VERSION : INITIAL_VERSION; 1819 } 1820 1821 /** 1822 * Creates a new zip entry taking some information from the given 1823 * file and using the provided name. 1824 * 1825 * <p>The name will be adjusted to end with a forward slash "/" if 1826 * the file is a directory. If the file is not a directory a 1827 * potential trailing forward slash will be stripped from the 1828 * entry name.</p> 1829 * 1830 * <p>Must not be used if the stream has already been closed.</p> 1831 */ 1832 @Override 1833 public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName) 1834 throws IOException { 1835 if (finished) { 1836 throw new IOException("Stream has already been finished"); 1837 } 1838 return new ZipArchiveEntry(inputFile, entryName); 1839 } 1840 1841 /** 1842 * Creates a new zip entry taking some information from the given 1843 * file and using the provided name. 1844 * 1845 * <p>The name will be adjusted to end with a forward slash "/" if 1846 * the file is a directory. If the file is not a directory a 1847 * potential trailing forward slash will be stripped from the 1848 * entry name.</p> 1849 * 1850 * <p>Must not be used if the stream has already been closed.</p> 1851 * @param inputPath path to create the entry from. 1852 * @param entryName name of the entry. 1853 * @param options options indicating how symbolic links are handled. 1854 * @return a new instance. 1855 * @throws IOException if an I/O error occurs. 1856 * @since 1.21 1857 */ 1858 @Override 1859 public ArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) 1860 throws IOException { 1861 if (finished) { 1862 throw new IOException("Stream has already been finished"); 1863 } 1864 return new ZipArchiveEntry(inputPath, entryName); 1865 } 1866 1867 /** 1868 * Get the existing ZIP64 extended information extra field or 1869 * create a new one and add it to the entry. 1870 * 1871 * @since 1.3 1872 */ 1873 private Zip64ExtendedInformationExtraField 1874 getZip64Extra(final ZipArchiveEntry ze) { 1875 if (entry != null) { 1876 entry.causedUseOfZip64 = !hasUsedZip64; 1877 } 1878 hasUsedZip64 = true; 1879 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1880 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField 1881 ? (Zip64ExtendedInformationExtraField) extra : null; 1882 if (z64 == null) { 1883 /* 1884 System.err.println("Adding z64 for " + ze.getName() 1885 + ", method: " + ze.getMethod() 1886 + " (" + (ze.getMethod() == STORED) + ")" 1887 + ", channel: " + (channel != null)); 1888 */ 1889 z64 = new Zip64ExtendedInformationExtraField(); 1890 } 1891 1892 // even if the field is there already, make sure it is the first one 1893 ze.addAsFirstExtraField(z64); 1894 1895 return z64; 1896 } 1897 1898 /** 1899 * Is there a ZIP64 extended information extra field for the 1900 * entry? 1901 * 1902 * @since 1.3 1903 */ 1904 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1905 return ze.getExtraField(Zip64ExtendedInformationExtraField 1906 .HEADER_ID) 1907 instanceof Zip64ExtendedInformationExtraField; 1908 } 1909 1910 /** 1911 * If the mode is AsNeeded and the entry is a compressed entry of 1912 * unknown size that gets written to a non-seekable stream then 1913 * change the default to Never. 1914 * 1915 * @since 1.3 1916 */ 1917 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1918 if (zip64Mode != Zip64Mode.AsNeeded 1919 || channel != null 1920 || ze.getMethod() != DEFLATED 1921 || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1922 return zip64Mode; 1923 } 1924 return Zip64Mode.Never; 1925 } 1926 1927 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1928 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1929 return !encodable && fallbackToUTF8 1930 ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding; 1931 } 1932 1933 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1934 return getEntryEncoding(ze).encode(ze.getName()); 1935 } 1936 1937 /** 1938 * Closes the underlying stream/file without finishing the 1939 * archive, the result will likely be a corrupt archive. 1940 * 1941 * <p>This method only exists to support tests that generate 1942 * corrupt archives so they can clean up any temporary files.</p> 1943 */ 1944 void destroy() throws IOException { 1945 try { 1946 if (channel != null) { 1947 channel.close(); 1948 } 1949 } finally { 1950 if (outputStream != null) { 1951 outputStream.close(); 1952 } 1953 } 1954 } 1955 1956 /** 1957 * enum that represents the possible policies for creating Unicode 1958 * extra fields. 1959 */ 1960 public static final class UnicodeExtraFieldPolicy { 1961 /** 1962 * Always create Unicode extra fields. 1963 */ 1964 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 1965 /** 1966 * Never create Unicode extra fields. 1967 */ 1968 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 1969 /** 1970 * Create Unicode extra fields for file names that cannot be 1971 * encoded using the specified encoding. 1972 */ 1973 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = 1974 new UnicodeExtraFieldPolicy("not encodeable"); 1975 1976 private final String name; 1977 private UnicodeExtraFieldPolicy(final String n) { 1978 name = n; 1979 } 1980 @Override 1981 public String toString() { 1982 return name; 1983 } 1984 } 1985 1986 /** 1987 * Structure collecting information for the entry that is 1988 * currently being written. 1989 */ 1990 private static final class CurrentEntry { 1991 private CurrentEntry(final ZipArchiveEntry entry) { 1992 this.entry = entry; 1993 } 1994 /** 1995 * Current ZIP entry. 1996 */ 1997 private final ZipArchiveEntry entry; 1998 /** 1999 * Offset for CRC entry in the local file header data for the 2000 * current entry starts here. 2001 */ 2002 private long localDataStart; 2003 /** 2004 * Data for local header data 2005 */ 2006 private long dataStart; 2007 /** 2008 * Number of bytes read for the current entry (can't rely on 2009 * Deflater#getBytesRead) when using DEFLATED. 2010 */ 2011 private long bytesRead; 2012 /** 2013 * Whether current entry was the first one using ZIP64 features. 2014 */ 2015 private boolean causedUseOfZip64; 2016 /** 2017 * Whether write() has been called at all. 2018 * 2019 * <p>In order to create a valid archive {@link 2020 * #closeArchiveEntry closeArchiveEntry} will write an empty 2021 * array to get the CRC right if nothing has been written to 2022 * the stream at all.</p> 2023 */ 2024 private boolean hasWritten; 2025 } 2026 2027 private static final class EntryMetaData { 2028 private final long offset; 2029 private final boolean usesDataDescriptor; 2030 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 2031 this.offset = offset; 2032 this.usesDataDescriptor = usesDataDescriptor; 2033 } 2034 } 2035}