001 /* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019 package org.apache.commons.compress.archivers.dump; 020 021 import org.apache.commons.compress.archivers.ArchiveException; 022 import org.apache.commons.compress.archivers.ArchiveInputStream; 023 024 import java.io.EOFException; 025 import java.io.IOException; 026 import java.io.InputStream; 027 028 import java.util.Arrays; 029 import java.util.Comparator; 030 import java.util.HashMap; 031 import java.util.Map; 032 import java.util.PriorityQueue; 033 import java.util.Queue; 034 import java.util.Stack; 035 036 /** 037 * The DumpArchiveInputStream reads a UNIX dump archive as an InputStream. 038 * Methods are provided to position at each successive entry in 039 * the archive, and the read each entry as a normal input stream 040 * using read(). 041 * 042 * @NotThreadSafe 043 */ 044 public class DumpArchiveInputStream extends ArchiveInputStream { 045 private DumpArchiveSummary summary; 046 private DumpArchiveEntry active; 047 private boolean isClosed; 048 private boolean hasHitEOF; 049 private long entrySize; 050 private long entryOffset; 051 private int readIdx; 052 private final byte[] readBuf = new byte[DumpArchiveConstants.TP_SIZE]; 053 private byte[] blockBuffer; 054 private int recordOffset; 055 private long filepos; 056 protected TapeInputStream raw; 057 058 // map of ino -> dirent entry. We can use this to reconstruct full paths. 059 private final Map<Integer, Dirent> names = new HashMap<Integer, Dirent>(); 060 061 // map of ino -> (directory) entry when we're missing one or more elements in the path. 062 private final Map<Integer, DumpArchiveEntry> pending = new HashMap<Integer, DumpArchiveEntry>(); 063 064 // queue of (directory) entries where we now have the full path. 065 private Queue<DumpArchiveEntry> queue; 066 067 /** 068 * Constructor. 069 * 070 * @param is 071 * @throws ArchiveException 072 */ 073 public DumpArchiveInputStream(InputStream is) throws ArchiveException { 074 this.raw = new TapeInputStream(is); 075 this.hasHitEOF = false; 076 077 try { 078 // read header, verify it's a dump archive. 079 byte[] headerBytes = raw.readRecord(); 080 081 if (!DumpArchiveUtil.verify(headerBytes)) { 082 throw new UnrecognizedFormatException(); 083 } 084 085 // get summary information 086 summary = new DumpArchiveSummary(headerBytes); 087 088 // reset buffer with actual block size. 089 raw.resetBlockSize(summary.getNTRec(), summary.isCompressed()); 090 091 // allocate our read buffer. 092 blockBuffer = new byte[4 * DumpArchiveConstants.TP_SIZE]; 093 094 // skip past CLRI and BITS segments since we don't handle them yet. 095 readCLRI(); 096 readBITS(); 097 } catch (IOException ex) { 098 throw new ArchiveException(ex.getMessage(), ex); 099 } 100 101 // put in a dummy record for the root node. 102 Dirent root = new Dirent(2, 2, 4, "."); 103 names.put(Integer.valueOf(2), root); 104 105 // use priority based on queue to ensure parent directories are 106 // released first. 107 queue = new PriorityQueue<DumpArchiveEntry>(10, 108 new Comparator<DumpArchiveEntry>() { 109 public int compare(DumpArchiveEntry p, DumpArchiveEntry q) { 110 if ((p.getOriginalName() == null) || (q.getOriginalName() == null)) { 111 return Integer.MAX_VALUE; 112 } 113 114 return p.getOriginalName().compareTo(q.getOriginalName()); 115 } 116 }); 117 } 118 119 @Deprecated 120 @Override 121 public int getCount() { 122 return (int) getBytesRead(); 123 } 124 125 @Override 126 public long getBytesRead() { 127 return raw.getBytesRead(); 128 } 129 130 /** 131 * Return the archive summary information. 132 */ 133 public DumpArchiveSummary getSummary() { 134 return summary; 135 } 136 137 /** 138 * Read CLRI (deleted inode) segment. 139 */ 140 private void readCLRI() throws IOException { 141 byte[] readBuf = raw.readRecord(); 142 143 if (!DumpArchiveUtil.verify(readBuf)) { 144 throw new InvalidFormatException(); 145 } 146 147 active = DumpArchiveEntry.parse(readBuf); 148 149 if (DumpArchiveConstants.SEGMENT_TYPE.CLRI != active.getHeaderType()) { 150 throw new InvalidFormatException(); 151 } 152 153 // we don't do anything with this yet. 154 if (raw.skip(DumpArchiveConstants.TP_SIZE * active.getHeaderCount()) 155 == -1) { 156 throw new EOFException(); 157 } 158 readIdx = active.getHeaderCount(); 159 } 160 161 /** 162 * Read BITS segment. 163 */ 164 private void readBITS() throws IOException { 165 byte[] readBuf = raw.readRecord(); 166 167 if (!DumpArchiveUtil.verify(readBuf)) { 168 throw new InvalidFormatException(); 169 } 170 171 active = DumpArchiveEntry.parse(readBuf); 172 173 if (DumpArchiveConstants.SEGMENT_TYPE.BITS != active.getHeaderType()) { 174 throw new InvalidFormatException(); 175 } 176 177 // we don't do anything with this yet. 178 if (raw.skip(DumpArchiveConstants.TP_SIZE * active.getHeaderCount()) 179 == -1) { 180 throw new EOFException(); 181 } 182 readIdx = active.getHeaderCount(); 183 } 184 185 /** 186 * Read the next entry. 187 */ 188 public DumpArchiveEntry getNextDumpEntry() throws IOException { 189 return getNextEntry(); 190 } 191 192 /** 193 * Read the next entry. 194 */ 195 @Override 196 public DumpArchiveEntry getNextEntry() throws IOException { 197 DumpArchiveEntry entry = null; 198 String path = null; 199 200 // is there anything in the queue? 201 if (!queue.isEmpty()) { 202 return queue.remove(); 203 } 204 205 while (entry == null) { 206 if (hasHitEOF) { 207 return null; 208 } 209 210 // skip any remaining records in this segment for prior file. 211 // we might still have holes... easiest to do it 212 // block by block. We may want to revisit this if 213 // the unnecessary decompression time adds up. 214 while (readIdx < active.getHeaderCount()) { 215 if (!active.isSparseRecord(readIdx++) 216 && raw.skip(DumpArchiveConstants.TP_SIZE) == -1) { 217 throw new EOFException(); 218 } 219 } 220 221 readIdx = 0; 222 filepos = raw.getBytesRead(); 223 224 byte[] headerBytes = raw.readRecord(); 225 226 if (!DumpArchiveUtil.verify(headerBytes)) { 227 throw new InvalidFormatException(); 228 } 229 230 active = DumpArchiveEntry.parse(headerBytes); 231 232 // skip any remaining segments for prior file. 233 while (DumpArchiveConstants.SEGMENT_TYPE.ADDR == active.getHeaderType()) { 234 if (raw.skip(DumpArchiveConstants.TP_SIZE 235 * (active.getHeaderCount() 236 - active.getHeaderHoles())) == -1) { 237 throw new EOFException(); 238 } 239 240 filepos = raw.getBytesRead(); 241 headerBytes = raw.readRecord(); 242 243 if (!DumpArchiveUtil.verify(headerBytes)) { 244 throw new InvalidFormatException(); 245 } 246 247 active = DumpArchiveEntry.parse(headerBytes); 248 } 249 250 // check if this is an end-of-volume marker. 251 if (DumpArchiveConstants.SEGMENT_TYPE.END == active.getHeaderType()) { 252 hasHitEOF = true; 253 254 return null; 255 } 256 257 entry = active; 258 259 if (entry.isDirectory()) { 260 readDirectoryEntry(active); 261 262 // now we create an empty InputStream. 263 entryOffset = 0; 264 entrySize = 0; 265 readIdx = active.getHeaderCount(); 266 } else { 267 entryOffset = 0; 268 entrySize = active.getEntrySize(); 269 readIdx = 0; 270 } 271 272 recordOffset = readBuf.length; 273 274 path = getPath(entry); 275 276 if (path == null) { 277 entry = null; 278 } 279 } 280 281 entry.setName(path); 282 entry.setSimpleName(names.get(Integer.valueOf(entry.getIno())).getName()); 283 entry.setOffset(filepos); 284 285 return entry; 286 } 287 288 /** 289 * Read directory entry. 290 */ 291 private void readDirectoryEntry(DumpArchiveEntry entry) 292 throws IOException { 293 long size = entry.getEntrySize(); 294 boolean first = true; 295 296 while (first || 297 (DumpArchiveConstants.SEGMENT_TYPE.ADDR == entry.getHeaderType())) { 298 // read the header that we just peeked at. 299 if (!first) { 300 raw.readRecord(); 301 } 302 303 if (!names.containsKey(Integer.valueOf(entry.getIno())) && 304 (DumpArchiveConstants.SEGMENT_TYPE.INODE == entry.getHeaderType())) { 305 pending.put(Integer.valueOf(entry.getIno()), entry); 306 } 307 308 int datalen = DumpArchiveConstants.TP_SIZE * entry.getHeaderCount(); 309 310 if (blockBuffer.length < datalen) { 311 blockBuffer = new byte[datalen]; 312 } 313 314 if (raw.read(blockBuffer, 0, datalen) != datalen) { 315 throw new EOFException(); 316 } 317 318 int reclen = 0; 319 320 for (int i = 0; (i < (datalen - 8)) && (i < (size - 8)); 321 i += reclen) { 322 int ino = DumpArchiveUtil.convert32(blockBuffer, i); 323 reclen = DumpArchiveUtil.convert16(blockBuffer, i + 4); 324 325 byte type = blockBuffer[i + 6]; 326 327 String name = new String(blockBuffer, i + 8, blockBuffer[i + 7]); // TODO default charset? 328 329 if (".".equals(name) || "..".equals(name)) { 330 // do nothing... 331 continue; 332 } 333 334 Dirent d = new Dirent(ino, entry.getIno(), type, name); 335 336 /* 337 if ((type == 4) && names.containsKey(ino)) { 338 System.out.println("we already have ino: " + 339 names.get(ino)); 340 } 341 */ 342 343 names.put(Integer.valueOf(ino), d); 344 345 // check whether this allows us to fill anything in the pending list. 346 for (Map.Entry<Integer, DumpArchiveEntry> e : pending.entrySet()) { 347 String path = getPath(e.getValue()); 348 349 if (path != null) { 350 e.getValue().setName(path); 351 e.getValue() 352 .setSimpleName(names.get(e.getKey()).getName()); 353 queue.add(e.getValue()); 354 } 355 } 356 357 // remove anything that we found. (We can't do it earlier 358 // because of concurrent modification exceptions.) 359 for (DumpArchiveEntry e : queue) { 360 pending.remove(Integer.valueOf(e.getIno())); 361 } 362 } 363 364 byte[] peekBytes = raw.peek(); 365 366 if (!DumpArchiveUtil.verify(peekBytes)) { 367 throw new InvalidFormatException(); 368 } 369 370 entry = DumpArchiveEntry.parse(peekBytes); 371 first = false; 372 size -= DumpArchiveConstants.TP_SIZE; 373 } 374 } 375 376 /** 377 * Get full path for specified archive entry, or null if there's a gap. 378 * 379 * @param entry 380 * @return full path for specified archive entry, or null if there's a gap. 381 */ 382 private String getPath(DumpArchiveEntry entry) { 383 // build the stack of elements. It's possible that we're 384 // still missing an intermediate value and if so we 385 Stack<String> elements = new Stack<String>(); 386 Dirent dirent = null; 387 388 for (int i = entry.getIno();; i = dirent.getParentIno()) { 389 if (!names.containsKey(Integer.valueOf(i))) { 390 elements.clear(); 391 break; 392 } 393 394 dirent = names.get(Integer.valueOf(i)); 395 elements.push(dirent.getName()); 396 397 if (dirent.getIno() == dirent.getParentIno()) { 398 break; 399 } 400 } 401 402 // if an element is missing defer the work and read next entry. 403 if (elements.isEmpty()) { 404 pending.put(Integer.valueOf(entry.getIno()), entry); 405 406 return null; 407 } 408 409 // generate full path from stack of elements. 410 StringBuilder sb = new StringBuilder(elements.pop()); 411 412 while (!elements.isEmpty()) { 413 sb.append('/'); 414 sb.append(elements.pop()); 415 } 416 417 return sb.toString(); 418 } 419 420 /** 421 * Reads bytes from the current dump archive entry. 422 * 423 * This method is aware of the boundaries of the current 424 * entry in the archive and will deal with them as if they 425 * were this stream's start and EOF. 426 * 427 * @param buf The buffer into which to place bytes read. 428 * @param off The offset at which to place bytes read. 429 * @param len The number of bytes to read. 430 * @return The number of bytes read, or -1 at EOF. 431 * @throws IOException on error 432 */ 433 @Override 434 public int read(byte[] buf, int off, int len) throws IOException { 435 int totalRead = 0; 436 437 if (hasHitEOF || isClosed || (entryOffset >= entrySize)) { 438 return -1; 439 } 440 441 if ((len + entryOffset) > entrySize) { 442 len = (int) (entrySize - entryOffset); 443 } 444 445 while (len > 0) { 446 int sz = (len > (readBuf.length - recordOffset)) 447 ? (readBuf.length - recordOffset) : len; 448 449 // copy any data we have 450 if ((recordOffset + sz) <= readBuf.length) { 451 System.arraycopy(readBuf, recordOffset, buf, off, sz); 452 totalRead += sz; 453 recordOffset += sz; 454 len -= sz; 455 off += sz; 456 } 457 458 // load next block if necessary. 459 if (len > 0) { 460 if (readIdx >= 512) { 461 byte[] headerBytes = raw.readRecord(); 462 463 if (!DumpArchiveUtil.verify(headerBytes)) { 464 throw new InvalidFormatException(); 465 } 466 467 active = DumpArchiveEntry.parse(headerBytes); 468 readIdx = 0; 469 } 470 471 if (!active.isSparseRecord(readIdx++)) { 472 int r = raw.read(readBuf, 0, readBuf.length); 473 if (r != readBuf.length) { 474 throw new EOFException(); 475 } 476 } else { 477 Arrays.fill(readBuf, (byte) 0); 478 } 479 480 recordOffset = 0; 481 } 482 } 483 484 entryOffset += totalRead; 485 486 return totalRead; 487 } 488 489 /** 490 * Closes the stream for this entry. 491 */ 492 @Override 493 public void close() throws IOException { 494 if (!isClosed) { 495 isClosed = true; 496 raw.close(); 497 } 498 } 499 500 /** 501 * Look at the first few bytes of the file to decide if it's a dump 502 * archive. With 32 bytes we can look at the magic value, with a full 503 * 1k we can verify the checksum. 504 */ 505 public static boolean matches(byte[] buffer, int length) { 506 // do we have enough of the header? 507 if (length < 32) { 508 return false; 509 } 510 511 // this is the best test 512 if (length >= DumpArchiveConstants.TP_SIZE) { 513 return DumpArchiveUtil.verify(buffer); 514 } 515 516 // this will work in a pinch. 517 return DumpArchiveConstants.NFS_MAGIC == DumpArchiveUtil.convert32(buffer, 518 24); 519 } 520 521 }