internal void WriteHeader(Stream s, int cycle)
{
// Must remember the offset, within the output stream, of this particular
// entry header.
//
// This is for 2 reasons:
//
// 1. so we can determine the RelativeOffsetOfLocalHeader (ROLH) for
// use in the central directory.
// 2. so we can seek backward in case there is an error opening or reading
// the file, and the application decides to skip the file. In this case,
// we need to seek backward in the output stream to allow the next entry
// to be added to the zipfile output stream.
//
// Normally you would just store the offset before writing to the output
// stream and be done with it. But the possibility to use split archives
// makes this approach ineffective. In split archives, each file or segment
// is bound to a max size limit, and each local file header must not span a
// segment boundary; it must be written contiguously. If it will fit in the
// current segment, then the ROLH is just the current Position in the output
// stream. If it won't fit, then we need a new file (segment) and the ROLH
// is zero.
//
// But we only can know if it is possible to write a header contiguously
// after we know the size of the local header, a size that varies with
// things like filename length, comments, and extra fields. We have to
// compute the header fully before knowing whether it will fit.
//
// That takes care of item #1 above. Now, regarding #2. If an error occurs
// while computing the local header, we want to just seek backward. The
// exception handling logic (in the caller of WriteHeader) uses ROLH to
// scroll back.
//
// All this means we have to preserve the starting offset before computing
// the header, and also we have to compute the offset later, to handle the
// case of split archives.
var counter = s as CountingStream;
// workitem 8098: ok (output)
// This may change later, for split archives
// Don't set _RelativeOffsetOfLocalHeader. Instead, set a temp variable.
// This allows for re-streaming, where a zip entry might be read from a
// zip archive (and maybe decrypted, and maybe decompressed) and then
// written to another zip archive, with different settings for
// compression method, compression level, or encryption algorithm.
_future_ROLH = (counter != null)
? counter.ComputedPosition
: s.Position;
int j = 0, i = 0;
byte[] block = new byte[30];
// signature
block[i++] = (byte)(ZipConstants.ZipEntrySignature & 0x000000FF);
block[i++] = (byte)((ZipConstants.ZipEntrySignature & 0x0000FF00) >> 8);
block[i++] = (byte)((ZipConstants.ZipEntrySignature & 0x00FF0000) >> 16);
block[i++] = (byte)((ZipConstants.ZipEntrySignature & 0xFF000000) >> 24);
// Design notes for ZIP64:
//
// The specification says that the header must include the Compressed
// and Uncompressed sizes, as well as the CRC32 value. When creating
// a zip via streamed processing, these quantities are not known until
// after the compression is done. Thus, a typical way to do it is to
// insert zeroes for these quantities, then do the compression, then
// seek back to insert the appropriate values, then seek forward to
// the end of the file data.
//
// There is also the option of using bit 3 in the GP bitfield - to
// specify that there is a data descriptor after the file data
// containing these three quantities.
//
// This works when the size of the quantities is known, either 32-bits
// or 64 bits as with the ZIP64 extensions.
//
// With Zip64, the 4-byte fields are set to 0xffffffff, and there is a
// corresponding data block in the "extra field" that contains the
// actual Compressed, uncompressed sizes. (As well as an additional
// field, the "Relative Offset of Local Header")
//
// The problem is when the app desires to use ZIP64 extensions
// optionally, only when necessary. Suppose the library assumes no
// zip64 extensions when writing the header, then after compression
// finds that the size of the data requires zip64. At this point, the
// header, already written to the file, won't have the necessary data
// block in the "extra field". The size of the entry header is fixed,
// so it is not possible to just "add on" the zip64 data block after
// compressing the file. On the other hand, always using zip64 will
// break interoperability with many other systems and apps.
//
// The approach we take is to insert a 32-byte dummy data block in the
// extra field, whenever zip64 is to be used "as necessary". This data
// block will get the actual zip64 HeaderId and zip64 metadata if
// necessary. If not necessary, the data block will get a meaningless
// HeaderId (0x1111), and will be filled with zeroes.
//
// When zip64 is actually in use, we also need to set the
// VersionNeededToExtract field to 45.
//
// There is one additional wrinkle: using zip64 as necessary conflicts
// with output to non-seekable devices. The header is emitted and
// must indicate whether zip64 is in use, before we know if zip64 is
// necessary. Because there is no seeking, the header can never be
// changed. Therefore, on non-seekable devices,
// Zip64Option.AsNecessary is the same as Zip64Option.Always.
//
// version needed- see AppNote.txt.
//
// need v5.1 for PKZIP strong encryption, or v2.0 for no encryption or
// for PK encryption, 4.5 for zip64. We may reset this later, as
// necessary or zip64.
_presumeZip64 = (_container.Zip64 == Zip64Option.Always ||
(_container.Zip64 == Zip64Option.AsNecessary && !s.CanSeek));
Int16 VersionNeededToExtract = (Int16)(_presumeZip64 ? 45 : 20);
#if BZIP
if (this.CompressionMethod == Ionic.Zip.CompressionMethod.BZip2)
VersionNeededToExtract = 46;
#endif
// (i==4)
block[i++] = (byte)(VersionNeededToExtract & 0x00FF);
block[i++] = (byte)((VersionNeededToExtract & 0xFF00) >> 8);
// Get byte array. Side effect: sets ActualEncoding.
// Must determine encoding before setting the bitfield.
// workitem 6513
byte[] fileNameBytes = GetEncodedFileNameBytes();
Int16 filenameLength = (Int16)fileNameBytes.Length;
// general purpose bitfield
// In the current implementation, this library uses only these bits
// in the GP bitfield:
// bit 0 = if set, indicates the entry is encrypted
// bit 3 = if set, indicates the CRC, C and UC sizes follow the file data.
// bit 6 = strong encryption - for pkware's meaning of strong encryption
// bit 11 = UTF-8 encoding is used in the comment and filename
// Here we set or unset the encryption bit.
// _BitField may already be set, as with a ZipEntry added into ZipOutputStream, which
// has bit 3 always set. We only want to set one bit
if (_Encryption == EncryptionAlgorithm.None)
_BitField &= ~1; // encryption bit OFF
else
_BitField |= 1; // encryption bit ON
// workitem 7941: WinZip does not the "strong encryption" bit when using AES.
// This "Strong Encryption" is a PKWare Strong encryption thing.
// _BitField |= 0x0020;
// set the UTF8 bit if necessary
#if SILVERLIGHT
if (_actualEncoding.WebName == "utf-8")
#else
if (_actualEncoding.CodePage == System.Text.Encoding.UTF8.CodePage)
#endif
_BitField |= 0x0800;
// The PKZIP spec says that if bit 3 is set (0x0008) in the General
// Purpose BitField, then the CRC, Compressed size, and uncompressed
// size are written directly after the file data.
//
// These 3 quantities are normally present in the regular zip entry
// header. But, they are not knowable until after the compression is
// done. So, in the normal case, we
//
// - write the header, using zeros for these quantities
// - compress the data, and incidentally compute these quantities.
// - seek back and write the correct values them into the header.
//
// This is nice because, while it is more complicated to write the zip
// file, it is simpler and less error prone to read the zip file, and
// as a result more applications can read zip files produced this way,
// with those 3 quantities in the header.
//
// But if seeking in the output stream is not possible, then we need
// to set the appropriate bitfield and emit these quantities after the
// compressed file data in the output.
//
// workitem 7216 - having trouble formatting a zip64 file that is
// readable by WinZip. not sure why! What I found is that setting
// bit 3 and following all the implications, the zip64 file is
// readable by WinZip 12. and Perl's IO::Compress::Zip . Perl takes
// an interesting approach - it always sets bit 3 if ZIP64 in use.
// DotNetZip now does the same; this gives better compatibility with
// WinZip 12.
if (IsDirectory || cycle == 99)
{
// (cycle == 99) indicates a zero-length entry written by ZipOutputStream
_BitField &= ~0x0008; // unset bit 3 - no "data descriptor" - ever
_BitField &= ~0x0001; // unset bit 1 - no encryption - ever
Encryption = EncryptionAlgorithm.None;
Password = null;
}
else if (!s.CanSeek)
_BitField |= 0x0008;
#if DONT_GO_THERE
else if (this.Encryption == EncryptionAlgorithm.PkzipWeak &&
this._Source != ZipEntrySource.ZipFile)
{
// Set bit 3 to avoid the double-read perf issue.
//
// When PKZIP encryption is used, byte 11 of the encryption header is
// used as a consistency check. It is normally set to the MSByte of the
// CRC. But this means the cRC must be known ebfore compression and
// encryption, which means the entire stream has to be read twice. To
// avoid that, the high-byte of the time blob (when in DOS format) can
// be used for the consistency check (byte 11 in the encryption header).
// But this means the entry must have bit 3 set.
//
// Previously I used a more complex arrangement - using the methods like
// FigureCrc32(), PrepOutputStream() and others, in order to manage the
// seek-back in the source stream. Why? Because bit 3 is not always
// friendly with third-party zip tools, like those on the Mac.
//
// This is why this code is still ifdef'd out.
//
// Might consider making this yet another programmable option -
// AlwaysUseBit3ForPkzip. But that's for another day.
//
_BitField |= 0x0008;
}
#endif
// (i==6)
block[i++] = (byte)(_BitField & 0x00FF);
block[i++] = (byte)((_BitField & 0xFF00) >> 8);
// Here, we want to set values for Compressed Size, Uncompressed Size,
// and CRC. If we have __FileDataPosition as not -1 (zero is a valid
// FDP), then that means we are reading this zip entry from a zip
// file, and we have good values for those quantities.
//
// If _FileDataPosition is -1, then we are constructing this Entry
// from nothing. We zero those quantities now, and we will compute
// actual values for the three quantities later, when we do the
// compression, and then seek back to write them into the appropriate
// place in the header.
if (this.__FileDataPosition == -1)
{
//_UncompressedSize = 0; // do not unset - may need this value for restream
// _Crc32 = 0; // ditto
_CompressedSize = 0;
_crcCalculated = false;
}
// set compression method here
MaybeUnsetCompressionMethodForWriting(cycle);
// (i==8) compression method
block[i++] = (byte)(_CompressionMethod & 0x00FF);
block[i++] = (byte)((_CompressionMethod & 0xFF00) >> 8);
if (cycle == 99)
{
// (cycle == 99) indicates a zero-length entry written by ZipOutputStream
SetZip64Flags();
}
#if AESCRYPTO
else if (Encryption == EncryptionAlgorithm.WinZipAes128 || Encryption == EncryptionAlgorithm.WinZipAes256)
{
i -= 2;
block[i++] = 0x63;
block[i++] = 0;
}
#endif
// LastMod
_TimeBlob = Crisis.Ionic.Zip.SharedUtilities.DateTimeToPacked(LastModified);
// (i==10) time blob
block[i++] = (byte)(_TimeBlob & 0x000000FF);
block[i++] = (byte)((_TimeBlob & 0x0000FF00) >> 8);
block[i++] = (byte)((_TimeBlob & 0x00FF0000) >> 16);
block[i++] = (byte)((_TimeBlob & 0xFF000000) >> 24);
// (i==14) CRC - if source==filesystem, this is zero now, actual value
// will be calculated later. if source==archive, this is a bonafide
// value.
block[i++] = (byte)(_Crc32 & 0x000000FF);
block[i++] = (byte)((_Crc32 & 0x0000FF00) >> 8);
block[i++] = (byte)((_Crc32 & 0x00FF0000) >> 16);
block[i++] = (byte)((_Crc32 & 0xFF000000) >> 24);
if (_presumeZip64)
{
// (i==18) CompressedSize (Int32) and UncompressedSize - all 0xFF for now
for (j = 0; j < 8; j++)
block[i++] = 0xFF;
}
else
{
// (i==18) CompressedSize (Int32) - this value may or may not be
// bonafide. if source == filesystem, then it is zero, and we'll
// learn it after we compress. if source == archive, then it is
// bonafide data.
block[i++] = (byte)(_CompressedSize & 0x000000FF);
block[i++] = (byte)((_CompressedSize & 0x0000FF00) >> 8);
block[i++] = (byte)((_CompressedSize & 0x00FF0000) >> 16);
block[i++] = (byte)((_CompressedSize & 0xFF000000) >> 24);
// (i==22) UncompressedSize (Int32) - this value may or may not be
// bonafide.
block[i++] = (byte)(_UncompressedSize & 0x000000FF);
block[i++] = (byte)((_UncompressedSize & 0x0000FF00) >> 8);
block[i++] = (byte)((_UncompressedSize & 0x00FF0000) >> 16);
block[i++] = (byte)((_UncompressedSize & 0xFF000000) >> 24);
}
// (i==26) filename length (Int16)
block[i++] = (byte)(filenameLength & 0x00FF);
block[i++] = (byte)((filenameLength & 0xFF00) >> 8);
_Extra = ConstructExtraField(false);
// (i==28) extra field length (short)
Int16 extraFieldLength = (Int16)((_Extra == null) ? 0 : _Extra.Length);
block[i++] = (byte)(extraFieldLength & 0x00FF);
block[i++] = (byte)((extraFieldLength & 0xFF00) >> 8);
// workitem 13542
byte[] bytes = new byte[i + filenameLength + extraFieldLength];
// get the fixed portion
Buffer.BlockCopy(block, 0, bytes, 0, i);
//for (j = 0; j < i; j++) bytes[j] = block[j];
// The filename written to the archive.
Buffer.BlockCopy(fileNameBytes, 0, bytes, i, fileNameBytes.Length);
// for (j = 0; j < fileNameBytes.Length; j++)
// bytes[i + j] = fileNameBytes[j];
i += fileNameBytes.Length;
// "Extra field"
if (_Extra != null)
{
Buffer.BlockCopy(_Extra, 0, bytes, i, _Extra.Length);
// for (j = 0; j < _Extra.Length; j++)
// bytes[i + j] = _Extra[j];
i += _Extra.Length;
}
_LengthOfHeader = i;
// handle split archives
var zss = s as ZipSegmentedStream;
if (zss != null)
{
zss.ContiguousWrite = true;
UInt32 requiredSegment = zss.ComputeSegment(i);
if (requiredSegment != zss.CurrentSegment)
_future_ROLH = 0; // rollover!
else
_future_ROLH = zss.Position;
_diskNumber = requiredSegment;
}
// validate the ZIP64 usage
if (_container.Zip64 == Zip64Option.Never && (uint)_RelativeOffsetOfLocalHeader >= 0xFFFFFFFF)
throw new ZipException("Offset within the zip archive exceeds 0xFFFFFFFF. Consider setting the UseZip64WhenSaving property on the ZipFile instance.");
// finally, write the header to the stream
s.Write(bytes, 0, i);
// now that the header is written, we can turn off the contiguous write restriction.
if (zss != null)
zss.ContiguousWrite = false;
// Preserve this header data, we'll use it again later.
// ..when seeking backward, to write again, after we have the Crc, compressed
// and uncompressed sizes.
// ..and when writing the central directory structure.
_EntryHeader = bytes;
}