001 /* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019 package org.apache.commons.compress.archivers.zip; 020 021 import java.util.zip.ZipException; 022 023 import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 024 import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 025 026 /** 027 * Holds size and other extended information for entries that use Zip64 028 * features. 029 * 030 * <p>From {@link "http://www.pkware.com/documents/casestudies/APPNOTE.TXT PKWARE's APPNOTE.TXT"} 031 * <pre> 032 * Zip64 Extended Information Extra Field (0x0001): 033 * 034 * The following is the layout of the zip64 extended 035 * information "extra" block. If one of the size or 036 * offset fields in the Local or Central directory 037 * record is too small to hold the required data, 038 * a Zip64 extended information record is created. 039 * The order of the fields in the zip64 extended 040 * information record is fixed, but the fields will 041 * only appear if the corresponding Local or Central 042 * directory record field is set to 0xFFFF or 0xFFFFFFFF. 043 * 044 * Note: all fields stored in Intel low-byte/high-byte order. 045 * 046 * Value Size Description 047 * ----- ---- ----------- 048 * (ZIP64) 0x0001 2 bytes Tag for this "extra" block type 049 * Size 2 bytes Size of this "extra" block 050 * Original 051 * Size 8 bytes Original uncompressed file size 052 * Compressed 053 * Size 8 bytes Size of compressed data 054 * Relative Header 055 * Offset 8 bytes Offset of local header record 056 * Disk Start 057 * Number 4 bytes Number of the disk on which 058 * this file starts 059 * 060 * This entry in the Local header must include BOTH original 061 * and compressed file size fields. If encrypting the 062 * central directory and bit 13 of the general purpose bit 063 * flag is set indicating masking, the value stored in the 064 * Local Header for the original file size will be zero. 065 * </pre></p> 066 * 067 * <p>Currently Commons Compress doesn't support encrypting the 068 * central directory so the note about masking doesn't apply.</p> 069 * 070 * <p>The implementation relies on data being read from the local file 071 * header and assumes that both size values are always present.</p> 072 * 073 * @since 1.2 074 * @NotThreadSafe 075 */ 076 public class Zip64ExtendedInformationExtraField implements ZipExtraField { 077 078 static final ZipShort HEADER_ID = new ZipShort(0x0001); 079 080 private static final String LFH_MUST_HAVE_BOTH_SIZES_MSG = 081 "Zip64 extended information must contain" 082 + " both size values in the local file header."; 083 private static final byte[] EMPTY = new byte[0]; 084 085 private ZipEightByteInteger size, compressedSize, relativeHeaderOffset; 086 private ZipLong diskStart; 087 088 /** 089 * Stored in {@link #parseFromCentralDirectoryData 090 * parseFromCentralDirectoryData} so it can be reused when ZipFile 091 * calls {@link #reparseCentralDirectoryData 092 * reparseCentralDirectoryData}. 093 * 094 * <p>Not used for anything else</p> 095 * 096 * @since 1.3 097 */ 098 private byte[] rawCentralDirectoryData; 099 100 /** 101 * This constructor should only be used by the code that reads 102 * archives inside of Commons Compress. 103 */ 104 public Zip64ExtendedInformationExtraField() { } 105 106 /** 107 * Creates an extra field based on the original and compressed size. 108 * 109 * @param size the entry's original size 110 * @param compressedSize the entry's compressed size 111 * 112 * @throws IllegalArgumentException if size or compressedSize is null 113 */ 114 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size, 115 ZipEightByteInteger compressedSize) { 116 this(size, compressedSize, null, null); 117 } 118 119 /** 120 * Creates an extra field based on all four possible values. 121 * 122 * @param size the entry's original size 123 * @param compressedSize the entry's compressed size 124 * 125 * @throws IllegalArgumentException if size or compressedSize is null 126 */ 127 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size, 128 ZipEightByteInteger compressedSize, 129 ZipEightByteInteger relativeHeaderOffset, 130 ZipLong diskStart) { 131 this.size = size; 132 this.compressedSize = compressedSize; 133 this.relativeHeaderOffset = relativeHeaderOffset; 134 this.diskStart = diskStart; 135 } 136 137 /** {@inheritDoc} */ 138 public ZipShort getHeaderId() { 139 return HEADER_ID; 140 } 141 142 /** {@inheritDoc} */ 143 public ZipShort getLocalFileDataLength() { 144 return new ZipShort(size != null ? 2 * DWORD : 0); 145 } 146 147 /** {@inheritDoc} */ 148 public ZipShort getCentralDirectoryLength() { 149 return new ZipShort((size != null ? DWORD : 0) 150 + (compressedSize != null ? DWORD : 0) 151 + (relativeHeaderOffset != null ? DWORD : 0) 152 + (diskStart != null ? WORD : 0)); 153 } 154 155 /** {@inheritDoc} */ 156 public byte[] getLocalFileDataData() { 157 if (size != null || compressedSize != null) { 158 if (size == null || compressedSize == null) { 159 throw new IllegalArgumentException(LFH_MUST_HAVE_BOTH_SIZES_MSG); 160 } 161 byte[] data = new byte[2 * DWORD]; 162 addSizes(data); 163 return data; 164 } 165 return EMPTY; 166 } 167 168 /** {@inheritDoc} */ 169 public byte[] getCentralDirectoryData() { 170 byte[] data = new byte[getCentralDirectoryLength().getValue()]; 171 int off = addSizes(data); 172 if (relativeHeaderOffset != null) { 173 System.arraycopy(relativeHeaderOffset.getBytes(), 0, data, off, DWORD); 174 off += DWORD; 175 } 176 if (diskStart != null) { 177 System.arraycopy(diskStart.getBytes(), 0, data, off, WORD); 178 off += WORD; 179 } 180 return data; 181 } 182 183 /** {@inheritDoc} */ 184 public void parseFromLocalFileData(byte[] buffer, int offset, int length) 185 throws ZipException { 186 if (length == 0) { 187 // no local file data at all, may happen if an archive 188 // only holds a ZIP64 extended information extra field 189 // inside the central directory but not inside the local 190 // file header 191 return; 192 } 193 if (length < 2 * DWORD) { 194 throw new ZipException(LFH_MUST_HAVE_BOTH_SIZES_MSG); 195 } 196 size = new ZipEightByteInteger(buffer, offset); 197 offset += DWORD; 198 compressedSize = new ZipEightByteInteger(buffer, offset); 199 offset += DWORD; 200 int remaining = length - 2 * DWORD; 201 if (remaining >= DWORD) { 202 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset); 203 offset += DWORD; 204 remaining -= DWORD; 205 } 206 if (remaining >= WORD) { 207 diskStart = new ZipLong(buffer, offset); 208 offset += WORD; 209 remaining -= WORD; 210 } 211 } 212 213 /** {@inheritDoc} */ 214 public void parseFromCentralDirectoryData(byte[] buffer, int offset, 215 int length) 216 throws ZipException { 217 // store for processing in reparseCentralDirectoryData 218 rawCentralDirectoryData = new byte[length]; 219 System.arraycopy(buffer, offset, rawCentralDirectoryData, 0, length); 220 221 // if there is no size information in here, we are screwed and 222 // can only hope things will get resolved by LFH data later 223 // But there are some cases that can be detected 224 // * all data is there 225 // * length == 24 -> both sizes and offset 226 // * length % 8 == 4 -> at least we can identify the diskStart field 227 if (length >= 3 * DWORD + WORD) { 228 parseFromLocalFileData(buffer, offset, length); 229 } else if (length == 3 * DWORD) { 230 size = new ZipEightByteInteger(buffer, offset); 231 offset += DWORD; 232 compressedSize = new ZipEightByteInteger(buffer, offset); 233 offset += DWORD; 234 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset); 235 } else if (length % DWORD == WORD) { 236 diskStart = new ZipLong(buffer, offset + length - WORD); 237 } 238 } 239 240 /** 241 * Parses the raw bytes read from the central directory extra 242 * field with knowledge which fields are expected to be there. 243 * 244 * <p>All four fields inside the zip64 extended information extra 245 * field are optional and only present if their corresponding 246 * entry inside the central directory contains the correct magic 247 * value.</p> 248 */ 249 public void reparseCentralDirectoryData(boolean hasUncompressedSize, 250 boolean hasCompressedSize, 251 boolean hasRelativeHeaderOffset, 252 boolean hasDiskStart) 253 throws ZipException { 254 if (rawCentralDirectoryData != null) { 255 int expectedLength = (hasUncompressedSize ? DWORD : 0) 256 + (hasCompressedSize ? DWORD : 0) 257 + (hasRelativeHeaderOffset ? DWORD : 0) 258 + (hasDiskStart ? WORD : 0); 259 if (rawCentralDirectoryData.length != expectedLength) { 260 throw new ZipException("central directory zip64 extended" 261 + " information extra field's length" 262 + " doesn't match central directory" 263 + " data. Expected length " 264 + expectedLength + " but is " 265 + rawCentralDirectoryData.length); 266 } 267 int offset = 0; 268 if (hasUncompressedSize) { 269 size = new ZipEightByteInteger(rawCentralDirectoryData, offset); 270 offset += DWORD; 271 } 272 if (hasCompressedSize) { 273 compressedSize = new ZipEightByteInteger(rawCentralDirectoryData, 274 offset); 275 offset += DWORD; 276 } 277 if (hasRelativeHeaderOffset) { 278 relativeHeaderOffset = 279 new ZipEightByteInteger(rawCentralDirectoryData, offset); 280 offset += DWORD; 281 } 282 if (hasDiskStart) { 283 diskStart = new ZipLong(rawCentralDirectoryData, offset); 284 offset += WORD; 285 } 286 } 287 } 288 289 /** 290 * The uncompressed size stored in this extra field. 291 */ 292 public ZipEightByteInteger getSize() { 293 return size; 294 } 295 296 /** 297 * The uncompressed size stored in this extra field. 298 */ 299 public void setSize(ZipEightByteInteger size) { 300 this.size = size; 301 } 302 303 /** 304 * The compressed size stored in this extra field. 305 */ 306 public ZipEightByteInteger getCompressedSize() { 307 return compressedSize; 308 } 309 310 /** 311 * The uncompressed size stored in this extra field. 312 */ 313 public void setCompressedSize(ZipEightByteInteger compressedSize) { 314 this.compressedSize = compressedSize; 315 } 316 317 /** 318 * The relative header offset stored in this extra field. 319 */ 320 public ZipEightByteInteger getRelativeHeaderOffset() { 321 return relativeHeaderOffset; 322 } 323 324 /** 325 * The relative header offset stored in this extra field. 326 */ 327 public void setRelativeHeaderOffset(ZipEightByteInteger rho) { 328 relativeHeaderOffset = rho; 329 } 330 331 /** 332 * The disk start number stored in this extra field. 333 */ 334 public ZipLong getDiskStartNumber() { 335 return diskStart; 336 } 337 338 /** 339 * The disk start number stored in this extra field. 340 */ 341 public void setDiskStartNumber(ZipLong ds) { 342 diskStart = ds; 343 } 344 345 private int addSizes(byte[] data) { 346 int off = 0; 347 if (size != null) { 348 System.arraycopy(size.getBytes(), 0, data, 0, DWORD); 349 off += DWORD; 350 } 351 if (compressedSize != null) { 352 System.arraycopy(compressedSize.getBytes(), 0, data, off, DWORD); 353 off += DWORD; 354 } 355 return off; 356 } 357 }