1 module datapak;
2 
3 public import vfile;
4 import bindbc.zstandard.zstd;
5 import zlib = etc.c.zlib;
6 import std.digest;
7 import std.digest.murmurhash;
8 import std.digest.md;
9 import std.digest.sha;
10 import std.digest.ripemd;
11 import std.digest.crc;
12 import std.bitmanip;
13 import std.stdio;
14 import std..string : toStringz, fromStringz;
15 
16 /**
17  * DataPak (*.dpk) is mainly intended as a compression method for application assets. Technically it can store folder info,
18  * but the fixed filename length is shorter to save some extra space. There's some support for expanding the path, but
19  * currently unimplemented.
20  * 
21  * General layout of a file:
22  * <ul>
23  * <li>DataPak signature</li>
24  * <li>Header</li>
25  * <li>Extension area. Ignored by default, handling must be implemented by whoever wants to use it. Can be compressed alongside with the main
26  * data to save some space (not recommended if it's needed for compression, e.g. dictionaries), but only if the index field is also compressed.</li>
27  * <li>Array of DataPak indexes. Each entry can have some extension. Can be compressed alongside with the main data to save some space.</li>
28  * <li>CRC32 checksum at the begining of compressed block or at the end of file information description table.</li>
29  * <li>Data</li>
30  * </ul>
31  */
32 public class DataPak{
33 	///Every DataPak file begins with this.
34 	///The dot will be replaced with numbers if I ever decide to make any upgrades to the format
35 	static enum char[8] SIGNATURE = "DataPak.";		
36 	/**
37 	 * Default compression methods for the file.
38 	 */
39 	enum CompressionMethod : char[8]{
40 		uncompressed		=	"UNCMPRSD",
41 		deflate				=	"ZLIB    ",			
42 		zstandard			=	"ZSTD    ",			
43 		//the following algorithms are not yet implemented, but probably will be
44 		lz4					=	"LZ4     "
45 	}
46 	/**
47 	 * Selects between checksums.
48 	 * Please note that the more bytes the checksum needs, the less will be left for the filename.
49 	 * Values between 32-63 can be used for custom implementations.
50 	 */
51 	public enum ChecksumType : ubyte{
52 		none				=	0,
53 		ripeMD				=	1,
54 		murmurhash32_32		=	2,
55 		murmurhash128_32	=	3,
56 		murmurhash128_64	=	4,
57 		sha224				=	5,
58 		sha256				=	6,
59 		sha384				=	7,
60 		sha512				=	8,
61 		sha512_224			=	9,
62 		sha512_256			=	10,
63 		md5					=	11,
64 		crc32				=	12,
65 		crc64ISO			=	13,
66 		crc64ECMA			=	14
67 	}
68 	/**
69 	 * Stores the length of each checksum result
70 	 */
71 	package static immutable ubyte[15] CHECKSUM_LENGTH = [0, 20, 4, 16, 16, 28, 32, 48, 64, 28, 32, 16, 4, 8, 8];
72 	/**
73 	 * Stores important informations about the file.
74 	 */
75 	public struct Header{
76 	align(1):
77 		ulong		indexSize;		///Size of the index field in bytes, including the extension fields
78 		ulong		decompSize;		///Total decompressed size
79 		char[8]		compMethod;		///Compression method stored as a string
80 		uint		extFieldSize;	///Extension area size
81 		uint		numOfIndexes;	///Total number of file indexes
82 		mixin(bitfields!(
83 			bool, "compIndex", 		1,	///If high, the idexes will be compressed
84 			bool, "compExtField", 	1,	///If high, the extension field will be compressed
85 			ubyte, "checksumType",	6,	///Type of checksum for the files
86 			ubyte, "compLevel",		6,	///Compression level if needed
87 			uint, "",				18,	///Reserved for future use
88 		));
89 		//uint		padding;
90 	}
91 	/**
92 	 * Index representing data for a file.
93 	 */
94 	public struct Index{
95 	align(1):
96 		ulong 		offset;			///Points to where the file begins in the decompressed stream
97 		ushort		extFieldSize;	///Extension area size for the index
98 		ushort		sizeH;			///The upper 16 bits of the file's size
99 		uint		sizeL;			///The lower 32 bits of the file's size
100 		char[112]	field;			///Name of the file terminated with a null character + checksum in the end
101 		///Returns the filename into a string
102 		public @property string filename() @safe pure nothrow{
103 			string result;
104 			size_t pos;
105 			while(field.length > pos && field[pos] != 0xFF){
106 				result ~= field[pos];
107 				pos++;
108 			}
109 			return result;
110 		}
111 		///Sets the filename
112 		public @property string filename(string val) @safe @nogc pure nothrow{
113 			/*for(int i ; i < val.length && i < field.length ; i++){
114 				field[i] = val[i];
115 			}*/
116 			foreach(i , c ; val){
117 				field[i] = c;
118 			}
119 			return val;
120 		}
121 		///Returns the checksum/hash
122 		public ubyte[N] checksum(int N = 16)() @safe @nogc pure nothrow{
123 			ubyte[N] result;
124 			for(int i ; i < N ; i++){
125 				result[i] = field[field.length - N + i];
126 			}
127 			return result;
128 		}
129 		///Sets the checksum/hash
130 		public ubyte[N] checksum(int N = 16)(ubyte[N] val) @safe @nogc pure nothrow{
131 			for(int i ; i < N ; i++){
132 				field[field.length - N + i] = val[i];
133 			}
134 			return val;
135 		}
136 	}
137 	protected Header header;
138 	protected File file;
139 	protected Index[] indexes;
140 	protected string[] paths;			///Only used during compression
141 	protected uint nextIndex;
142 	protected ubyte[] extField;
143 	protected ubyte[][uint] indexExtFields;
144 	protected bool readOnly, createNew;
145 	protected void* compStream;
146 	protected zlib.z_stream deflateStream;
147 	protected ubyte[] readBuf, compBuf;
148 	protected ZSTD_inBuffer inBuff;
149 	protected ZSTD_outBuffer outBuff;
150 	protected size_t prevOutPos;		///0 if no data left from previous decompression
151 	protected ulong compPos, compPos0;	///Current position of data; position of all currently decompressed data
152 
153 	public pure void delegate(size_t pos) progress;	///Called to inform host on decompression process (will be replaced with something more useful in the future)
154 
155 	//Configuration area, might be replaced with individual values instead of static ones.
156 	public static bool enableHeaderChecksumError = true;	///If false, then it'll disable throw on header checksum mismatch
157 	public static bool enableFileChecksumError = true;		///If false, then it'll disable throw on file checksum mismatch and calculation (useful when dealing with complex hash algorithms not designed for quick checksums)
158 	public static size_t readBufferSize = 32 * 1024; ///Sets the read buffer size of all instances (default is 32kB)
159 	/**
160 	 * Loads a DataPak file from disk for reading.
161 	 */
162 	public this(string filename){
163 		this(File(filename));
164 		readOnly = true;
165 	}
166 	///Ditto
167 	public this(File f){
168 		CRC32 chkSmCalc = CRC32();
169 		ubyte[4] crc;
170 		char[] signature;
171 		signature.length = SIGNATURE.length;
172 		signature = f.rawRead(signature);
173 		//check for correct file signature
174 		//if(signature != SIGNATURE)
175 		foreach(i ,c ; signature)
176 			if(SIGNATURE[i] != c)
177 				throw new Exception("File isn't DataPak file");
178 		chkSmCalc.put(reinterpretCastA!ubyte(signature));
179 		readBuf.length = Header.sizeof;
180 		readBuf = f.rawRead(readBuf);
181 		header = reinterpretGet!Header(readBuf);
182 		//ubyte[4] chkSm = header.checksum;
183 		//header.checksum = [0x0, 0x0, 0x0, 0x0];
184 		//readBuf.length -= 4;
185 		//readBuf.length += 4;
186 		file = f;
187 		chkSmCalc.put(readBuf);
188 		initDecomp;
189 		if(header.extFieldSize){
190 			if(!header.compExtField){
191 				extField.length = header.extFieldSize;
192 				//f.rawRead(extField);
193 				extField = f.rawRead(extField);
194 			}else{
195 				
196 				f.rawRead(crc);
197 				extField = decompressFromFile(header.extFieldSize);
198 			}
199 			chkSmCalc.put(extField);
200 		}
201 		if(!header.compIndex){
202 			indexes.length = header.numOfIndexes;
203 			readBuf.length = Index.sizeof;
204 			for(int i; i < indexes.length; i++){
205 				//fread(readBuf.ptr, readBuf.length, 1, f);
206 				readBuf = f.rawRead(readBuf);
207 				indexes[i] = reinterpretGet!Index(readBuf);
208 				chkSmCalc.put(readBuf);
209 				if(indexes[i].extFieldSize){
210 					readBuf.length = indexes[i].extFieldSize;
211 					readBuf = f.rawRead(readBuf);
212 					chkSmCalc.put(readBuf);
213 					readBuf.length = Index.sizeof;
214 					indexExtFields[i] = readBuf.dup;
215 				}
216 			}
217 			f.rawRead(crc);
218 		}else{
219 			if(!header.compExtField)
220 				f.rawRead(crc);
221 			ubyte[] temp = decompressFromFile(header.indexSize);
222 			ubyte* tempPtr = temp.ptr;
223 			chkSmCalc.put(temp);
224 			for(int i; i < indexes.length; i++){
225 				indexes[i] = *(cast(Index*)(cast(void*)tempPtr));
226 				tempPtr += Index.sizeof;
227 				if(indexes[i].extFieldSize){
228 					indexExtFields[i] = tempPtr[0..indexes[i].extFieldSize].dup;
229 					tempPtr += indexes[i].extFieldSize;
230 				}
231 			}
232 		}
233 
234 		const ubyte[4] checksum = chkSmCalc.finish();
235 		if(crc != checksum && enableHeaderChecksumError){
236 			throw new BadChecksumException("CRC32 error in header/index");
237 		}
238 		
239 	}
240 	/**
241 	 * Creates a DataPak file from scratch.
242 	 */
243 	this(Header header, string targetName, ubyte[] extField = []){
244 		file = File(targetName, "wb");
245 		this.header = header;
246 		createNew = true;
247 		initComp;
248 	}
249 	~this(){
250 		//fclose(file);
251 		//deinitialize compression
252 		switch(header.compMethod){
253 			case CompressionMethod.zstandard:
254 				if(createNew){
255 					ZSTD_freeCStream(cast(ZSTD_CStream*)compStream);
256 				}else{
257 					ZSTD_freeDStream(cast(ZSTD_DStream*)compStream);
258 				}
259 				break;
260 			case CompressionMethod.deflate:
261 				break;
262 			default:	
263 				break;
264 		}
265 	}
266 	/**
267 	 * Adds a file to be compressed later.
268 	 * Returns the created index for it.
269 	 */
270 	public Index addFile(string filename, string newName = null, ubyte[] indexExtField = []){
271 		Index result;
272 		if(!newName.length){
273 			newName = filename;
274 		}
275 		result.filename = newName;
276 
277 		ubyte[] buffer;
278 		buffer.length = 32*1024;
279 		File f = File(filename);
280 		result.sizeL = cast(uint)(f.size);
281 		result.sizeH = cast(ushort)(f.size>>32);
282 		//Calculate checksums if needed
283 		size_t remain = cast(size_t)f.size;
284 		ubyte[N] _generateChecksum(CHKSM, size_t N)(CHKSM checksum){
285 			while(buffer.length){
286 				buffer = f.rawRead(buffer);
287 				checksum.put(buffer);
288 			}
289 			//checksum.finalize();
290 			return checksum.finish();
291 		}
292 		switch(header.checksumType){
293 			case ChecksumType.crc32:
294 				result.checksum(_generateChecksum!(CRC32, 4)(CRC32()));
295 				break;
296 			case ChecksumType.crc64ECMA:
297 				result.checksum(_generateChecksum!(CRC64ECMA, 8)(CRC64ECMA()));
298 				break;
299 			case ChecksumType.crc64ISO:
300 				result.checksum(_generateChecksum!(CRC64ISO, 8)(CRC64ISO()));
301 				break;
302 			case ChecksumType.md5:
303 				result.checksum(_generateChecksum!(MD5, 16)(MD5()));
304 				break;
305 			case ChecksumType.ripeMD:
306 				result.checksum(_generateChecksum!(RIPEMD160, 20)(RIPEMD160()));
307 				break;
308 			case ChecksumType.sha224:
309 				result.checksum(_generateChecksum!(SHA224, 28)(SHA224()));
310 				break;
311 			case ChecksumType.sha256:
312 				result.checksum(_generateChecksum!(SHA256, 32)(SHA256()));
313 				break;
314 			case ChecksumType.sha384:
315 				result.checksum(_generateChecksum!(SHA384, 48)(SHA384()));
316 				break;
317 			case ChecksumType.sha512:
318 				result.checksum(_generateChecksum!(SHA512, 64)(SHA512()));
319 				break;
320 			case ChecksumType.sha512_224:
321 				result.checksum(_generateChecksum!(SHA512_224, 28)(SHA512_224()));
322 				break;
323 			case ChecksumType.sha512_256:
324 				result.checksum(_generateChecksum!(SHA512_256, 32)(SHA512_256()));
325 				break;
326 			case ChecksumType.murmurhash32_32:
327 				result.checksum(_generateChecksum!(MurmurHash3!(32, 32), 4)(MurmurHash3!(32, 32)(0x66_69_6c_65)));
328 				break;
329 			case ChecksumType.murmurhash128_32:
330 				result.checksum(_generateChecksum!(MurmurHash3!(128, 32), 16)(MurmurHash3!(128, 32)(0x66_69_6c_65)));
331 				break;
332 			case ChecksumType.murmurhash128_64:
333 				result.checksum(_generateChecksum!(MurmurHash3!(128, 64), 16)(MurmurHash3!(128, 64)(0x66_69_6c_65_66_69_6c_65L)));
334 				break;
335 			default:
336 				break;
337 		}
338 		result.offset = compPos;
339 		compPos += f.size;
340 		result.extFieldSize = cast(ushort)indexExtField.length;
341 		if(indexExtField.length)
342 			indexExtFields[cast(uint)indexes.length] = indexExtField;
343 		indexes ~= result;
344 		header.decompSize += f.size;
345 		header.indexSize += Index.sizeof + indexExtField.length;
346 		header.numOfIndexes = cast(uint)indexes.length;
347 		return result;
348 	}
349 	/**
350 	 * Initializes compression.
351 	 */
352 	protected void initComp(){
353 		if(compStream) return;
354 		switch(header.compMethod){
355 			case CompressionMethod.uncompressed:
356 				break;
357 			case CompressionMethod.zstandard:
358 				compStream = ZSTD_createCStream();
359 				//const size_t result = ZSTD_initCStream(cast(ZSTD_CStream*)compStream, header.compLevel);
360 				readBuf.length = readBufferSize;
361 				ZSTD_CCtx_reset(cast(ZSTD_CStream*)compStream, ZSTD_ResetDirective.ZSTD_reset_session_only);
362 				ZSTD_CCtx_setParameter(cast(ZSTD_CStream*)compStream, ZSTD_cParameter.ZSTD_c_compressionLevel, header.compLevel);
363 				inBuff = ZSTD_inBuffer(readBuf.ptr, readBuf.length, 0);
364 				//compBuf.length = ZSTD_CStreamOutSize();
365 				compBuf.length = readBufferSize;
366 				outBuff = ZSTD_outBuffer(compBuf.ptr, compBuf.length, 0);
367 				break;
368 			case CompressionMethod.deflate:
369 				if(zlib.Z_OK != zlib.deflateInit(&deflateStream, header.compLevel))
370 					throw new CompressionException("Failed to initialize deflate.");
371 				readBuf.length = readBufferSize;
372 				deflateStream.avail_in = cast(uint)readBuf.length;
373 				deflateStream.next_in = readBuf.ptr;
374 				compBuf.length = readBufferSize;
375 				deflateStream.avail_out = cast(uint)compBuf.length;
376 				deflateStream.next_out = compBuf.ptr;
377 				break;
378 			default:
379 				throw new Exception("Unknown compression method");
380 		}
381 	}
382 	/**
383 	 * Initializes decompression.
384 	 */
385 	protected void initDecomp(){
386 		if(compStream) return;
387 		switch(header.compMethod){
388 			case CompressionMethod.uncompressed:
389 				break;
390 			case CompressionMethod.zstandard:
391 				compStream = ZSTD_createDStream();
392 				ZSTD_initDStream(cast(ZSTD_DStream*)compStream);
393 				//writeln(result);
394 				readBuf.length = readBufferSize;
395 				inBuff = ZSTD_inBuffer(readBuf.ptr, readBuf.length, 0);
396 				//compBuf.length = ZSTD_DStreamOutSize();
397 				//outBuff = ZSTD_outBuffer(compBuf.ptr, compBuf.length, 0);
398 				break;
399 			case CompressionMethod.deflate:
400 				if(zlib.Z_OK != zlib.inflateInit(&deflateStream))
401 					throw new CompressionException("Failed to initialize deflate");
402 				readBuf.length = readBufferSize;
403 				deflateStream.next_in = readBuf.ptr;
404 				deflateStream.avail_in = cast(uint)readBuf.length;
405 				break;
406 			default:
407 				throw new Exception("Unknown compression method");
408 		}
409 	}
410 	/**
411 	 * Returns a given index.
412 	 */
413 	public Index getIndex(uint i){
414 		if(i >= indexes.length)
415 			return Index.init;
416 		else
417 			return indexes[i];
418 	}
419 	/**
420 	 * Returns the index of next file.
421 	 */
422 	public Index getNextIndex(){
423 		if(nextIndex >= indexes.length)
424 			return Index.init;
425 		else
426 			return indexes[nextIndex];
427 	}
428 	/**
429 	 * Returns the next file as a VFile.
430 	 */
431 	/+public VFile getNextAsVFile(){
432 		VFile result = VFile.__ctor!ubyte(getNextAsArray);
433 		return result;
434 	}+/
435 	/**
436 	 * Returns the next file as an ubyte[] array.
437 	 */
438 	public ubyte[] getNextAsArray(){
439 		if(nextIndex >= indexes.length)
440 			return [];
441 		static if(size_t.sizeof == 4)
442 			ubyte[] result = decompressFromFile(indexes[nextIndex].sizeL);
443 		else{
444 			ubyte[] result = decompressFromFile(cast(ulong)indexes[nextIndex].sizeL | cast(ulong)indexes[nextIndex].sizeH<<32);
445 		}
446 
447 		nextIndex++;
448 		return result;
449 	}
450 	/**
451 	 * Checks the integrity of a file against a hash or checksum.
452 	 */
453 	protected bool checkFile(ubyte[] data, ubyte[] checksum){
454 		bool _checkFile (CHKSM)(CHKSM chkCalc) {
455 			chkCalc.put(data);
456 			immutable auto result = chkCalc.finish;
457 			if (result == checksum)
458 				return true;
459 			else
460 				return false;
461 		}
462 		switch(header.checksumType){
463 			case ChecksumType.ripeMD:
464 				return _checkFile(RIPEMD160());
465 			case ChecksumType.md5:
466 				return _checkFile(MD5());
467 			case ChecksumType.crc32:
468 				return _checkFile(CRC32());
469 			case ChecksumType.crc64ISO:
470 				return _checkFile(CRC64ISO());
471 			case ChecksumType.crc64ECMA:
472 				return _checkFile(CRC64ECMA());
473 			case ChecksumType.sha224:
474 				return _checkFile(SHA224());
475 			case ChecksumType.sha256:
476 				return _checkFile(SHA256());
477 			case ChecksumType.sha384:
478 				return _checkFile(SHA384());
479 			case ChecksumType.sha512:
480 				return _checkFile(SHA512());
481 			case ChecksumType.sha512_224:
482 				return _checkFile(SHA512_224());
483 			case ChecksumType.sha512_256:
484 				return _checkFile(SHA512_256());
485 			case ChecksumType.murmurhash32_32:
486 				return _checkFile(MurmurHash3!(32,32)(0x66_69_6c_65));
487 			case ChecksumType.murmurhash128_32:
488 				return _checkFile(MurmurHash3!(128,32)(0x66_69_6c_65));
489 			case ChecksumType.murmurhash128_64:
490 				return _checkFile(MurmurHash3!(128,64)(0x66_69_6c_65_66_69_6c_65L));
491 			default:
492 				return true;
493 		}
494 	}
495 	/**
496 	 * Begins compression into file.
497 	 */
498 	public void finalize(){
499 		CRC32 chkSmCalc = CRC32();
500 		file.rawWrite(SIGNATURE);
501 		chkSmCalc.put(reinterpretCast!ubyte(SIGNATURE));
502 		file.rawWrite([header]);
503 		chkSmCalc.put(reinterpretCast!ubyte(header));
504 		if(!header.compIndex && !header.compExtField){
505 			if(extField.length)
506 				file.rawWrite(extField);
507 				chkSmCalc.put(extField);
508 			foreach(n, i; indexes){
509 				file.rawWrite([i]);
510 				chkSmCalc.put(reinterpretCast!ubyte(i));
511 				if(indexExtFields.get(cast(uint)n, null) !is null){
512 					file.rawWrite(indexExtFields[cast(uint)n]);
513 					chkSmCalc.put(indexExtFields[cast(uint)n]);
514 				}
515 			}
516 			const ubyte[4] checksum = chkSmCalc.finish;
517 			file.rawWrite(checksum);
518 		}else
519 			throw new Exception("Feature not yet implemented");
520 		//write each files in order of access
521 		foreach(n, i; indexes){
522 			this.compress(i.filename, i);
523 		}
524 		switch (header.compMethod) {
525 			case CompressionMethod.zstandard:
526 				size_t remaining;
527 				do {
528 					remaining = ZSTD_compressStream2(cast(ZSTD_CStream*)compStream, &outBuff, &inBuff, ZSTD_EndDirective.ZSTD_e_end);
529 					if(outBuff.size)
530 						file.rawWrite(outBuff.dst[0..outBuff.pos]);
531 					outBuff.pos = 0;
532 				} while (remaining);
533 				break;
534 			case CompressionMethod.deflate:
535 				int result;
536 				//finish compression and flush whatever is remaining in the buffers
537 				do {
538 					result = zlib.deflate(&deflateStream, zlib.Z_FINISH);
539 					if (!deflateStream.avail_out) {	//write to disk if output buffer is full
540 						file.rawWrite(compBuf);
541 						deflateStream.avail_out = cast(uint)compBuf.length;
542 						deflateStream.next_out = compBuf.ptr;
543 					}
544 				} while (result != zlib.Z_STREAM_END);
545 				if (deflateStream.avail_out != compBuf.length) {
546 					file.rawWrite(compBuf[0..$-deflateStream.avail_out]);
547 					deflateStream.avail_out = cast(uint)compBuf.length;
548 					deflateStream.next_out = compBuf.ptr;
549 				}
550 				break;
551 			default:
552 				break;
553 		}
554 	}
555 	/**
556 	 * Compresses a single file into the stream.
557 	 */
558 	protected void compress(string source, Index index){
559 		File src = File(source, "rb");
560 		switch(header.compMethod){
561 			case CompressionMethod.uncompressed://in this case, we just want to copy the raw data into the file
562 				ubyte[] buffer;
563 				buffer.length = readBufferSize;
564 				/*while(src.tell + buffer.length >= src.size){
565 					src.rawRead(buffer);
566 					file.rawWrite(buffer);
567 				}*/
568 				do{
569 					buffer = src.rawRead(buffer);
570 					if(buffer.length)
571 						file.rawWrite(buffer);
572 				}while(buffer.length == readBufferSize);
573 				/*if(src.size - src.tell){
574 					buffer.length = cast(size_t)(src.size - src.tell);
575 					src.rawRead(buffer);
576 					file.rawWrite(buffer);
577 				}*/
578 				break;
579 			case CompressionMethod.zstandard:
580 				size_t compSize;
581 				readBuf.length = readBufferSize;
582 				do {
583 					readBuf = src.rawRead(readBuf);
584 					inBuff.src = readBuf.ptr;
585 					inBuff.pos = 0;
586 					inBuff.size = readBuf.length;
587 					while (inBuff.pos < inBuff.size) {
588 						compSize = ZSTD_compressStream2(cast(ZSTD_CStream*)compStream, &outBuff, &inBuff, ZSTD_EndDirective.ZSTD_e_continue);
589 						if (ZSTD_isError(compSize)) {
590 							throw new CompressionException(cast(string)(fromStringz(ZSTD_getErrorName(compSize))));
591 						}
592 						//writeln(source, ": ", compSize,"; ;",outBuff.pos);
593 						//fwrite(outBuff.dst, compSize, 1, file);
594 						if(outBuff.pos)
595 							file.rawWrite(outBuff.dst[0..outBuff.pos]);
596 						outBuff.pos = 0;
597 					}
598 					inBuff.size = readBuf.length;
599 				} while(readBuf.length == readBufferSize);
600 				//Flush to disk
601 				do {
602 					compSize = ZSTD_compressStream2(cast(ZSTD_CStream*)compStream, &outBuff, &inBuff, ZSTD_EndDirective.ZSTD_e_flush);
603 					//writeln(source, ": ", compSize,"; ",outBuff.pos);
604 					if(ZSTD_isError(compSize))
605 						throw new CompressionException(cast(string)fromStringz(ZSTD_getErrorName(compSize)));
606 					if(outBuff.pos)
607 						file.rawWrite(outBuff.dst[0..outBuff.pos]);
608 					outBuff.pos = 0;
609 				} while(compSize);
610 				
611 				outBuff.pos = 0;
612 				
613 				break;
614 			case CompressionMethod.deflate:
615 				readBuf.length = readBufferSize;
616 				do {
617 					readBuf = src.rawRead(readBuf);
618 					deflateStream.avail_in = cast(uint)readBuf.length;
619 					deflateStream.next_in = readBuf.ptr;
620 					do {
621 						int result;
622 						if(readBuf.length == readBufferSize)
623 							result = zlib.deflate(&deflateStream, zlib.Z_FULL_FLUSH);
624 						else
625 							result = zlib.deflate(&deflateStream, zlib.Z_SYNC_FLUSH);
626 						if (result < 0)
627 							throw new CompressionException(cast(string)(fromStringz(deflateStream.msg)));
628 						if (!deflateStream.avail_out) {	//write to disk if output buffer is full
629 							file.rawWrite(compBuf);
630 							deflateStream.avail_out = cast(uint)compBuf.length;
631 							deflateStream.next_out = compBuf.ptr;
632 						}
633 					} while (deflateStream.avail_in);
634 				} while (readBuf.length == readBufferSize);
635 				break;
636 			default:
637 				//fclose(src);
638 				throw new Exception("Unknown compression method");
639 		}
640 		//fclose(src);
641 	}
642 	/**
643 	 * Decompresses a given amount from the file from the current position.
644 	 */
645 	protected ubyte[] decompressFromFile (const size_t amount) {
646 		ubyte[] output;
647 		output.length = amount;
648 		//size_t curAmount;
649 		//writeln("compPos: ",compPos);
650 		switch (header.compMethod) {
651 			case CompressionMethod.uncompressed://in this case, we just want to read regular data from file
652 				
653 				//fread(output.ptr, output.length, 1, file);
654 				output = file.rawRead(output);
655 				if(output.length != amount)
656 					throw new Exception("EOF reached earlier than expected from header/indexing");
657 				break;
658 			case CompressionMethod.zstandard:
659 				//Try if we can get away with setting the output buffer the exact size a file needed, so we can avoid issues from overlapping decompression
660 				//output.length = amount;
661 				ZSTD_outBuffer localOutBuf = ZSTD_outBuffer(output.ptr, output.length, 0);
662 				//size_t prevPos;
663 				do {
664 					if (inBuff.size == inBuff.pos || !compPos) {
665 						inBuff.pos = 0;
666 						readBuf = file.rawRead(readBuf);
667 						inBuff.src = readBuf.ptr;
668 						inBuff.size = readBuf.length;
669 					}
670 					const size_t result = ZSTD_decompressStream(cast(ZSTD_DStream*)compStream, &localOutBuf, &inBuff);
671 					if(ZSTD_isError(result))
672 						throw new CompressionException(cast(string)(fromStringz(ZSTD_getErrorName(result))));
673 					
674 				} while (localOutBuf.size > localOutBuf.pos);
675 				break;
676 			case CompressionMethod.deflate:
677 				deflateStream.next_out = output.ptr;
678 				deflateStream.avail_out = cast(uint)output.length;
679 				int result;
680 				do {
681 					if (!deflateStream.avail_in || !compPos) {
682 						readBuf = file.rawRead(readBuf);
683 						deflateStream.next_in = readBuf.ptr;
684 						deflateStream.avail_in = cast(uint)readBuf.length;
685 					}
686 					//if(readBuf.length == readBufferSize)
687 					result = zlib.inflate(&deflateStream, zlib.Z_FULL_FLUSH);
688 					/+else
689 						result = zlib.inflate(&deflateStream, zlib.Z_SYNC_FLUSH);+/
690 					/+if(result < 0)
691 						throw new CompressionException(cast(string)(fromStringz(deflateStream.msg)));+/
692 				} while (deflateStream.avail_out);
693 				break;
694 			default:
695 				throw new Exception("Unknown compression method");
696 		}
697 		compPos += amount;
698 		readBuf.length = readBufferSize;
699 		return output;
700 	}
701 
702 	public static void loadZSTD () {
703 		import bindbc.zstandard.dynload;
704 		import bindbc.zstandard.config;
705 		ZSTDSupport result = loadZstandard();
706 		if (result == ZSTDSupport.noLibrary || result == ZSTDSupport.badLibrary)
707 			throw new Exception("ZSTD not found!");
708 	}
709 	public static void loadZSTD (string lib) {
710 		import bindbc.zstandard.dynload;
711 		import bindbc.zstandard.config;
712 		ZSTDSupport result = loadZstandard(toStringz(lib));
713 		if (result == ZSTDSupport.noLibrary || result == ZSTDSupport.badLibrary)
714 			throw new Exception("ZSTD not found!");
715 	}
716 }
717 
718 unittest{
719 	DataPak.Index index;
720 	index.filename = "something";
721 	writeln(index.field);
722 	assert(index.filename == "something");
723 }
724 /**
725  * Default extension for adding general support for using it as a regular file archival tool.
726  * This does not contain user-privilege settings, those will be relegated to another struct. 
727  */
728 struct DataPak_OSExt {
729 align(1):
730 	char[6]		id = "OSExt ";			///Identifies that this field is a DataPak_OSExt struct
731 	ushort		size = DataPak_OSExt.sizeof;	///Size of this field
732 	char[200]	nameExt;				///Stores filename extension + relative path
733 	ulong		creationDate;			///Creation date in 64 bit POSIX time format
734 	ulong		modifyDate;				///Modification date in 64 bit POSIX time format
735 	ulong		field;					///Unused by default, can store attributes if needed
736 	///Sets the name extension of the file
737 	public string name(string val) @safe @property @nogc nothrow pure{
738 		for (size_t i ; i < nameExt.length ; i++)
739 			nameExt[i] = 0xFF;
740 		foreach (i , c ; val)
741 			nameExt[i] = c;
742 		return val;
743 	}
744 	///Gets the name extension of the file
745 	public string name() @safe @property nothrow pure{
746 		string result;
747 		size_t pos;
748 		while(nameExt.length > pos && nameExt[pos] != 0xFF){
749 			result ~= nameExt[pos];
750 			pos++;
751 		}
752 		return result;
753 	}
754 }
755 /**
756  * Default extension for adding support for compression algorithms that support random access
757  */
758 struct DataPak_RandomAccessExt{
759 align(1):
760 	char[6]		id = "RandAc";			///Identifies that this field is a DataPak_RandomAccessExt struct
761 	ushort		size = DataPak_RandomAccessExt.sizeof;	///Size of this field
762 	ulong		position;				///Position of file
763 	union{
764 		ulong[2]	field64;
765 		uint[4]		field32;
766 		ushort[8]	field16;
767 		ubyte[16]	field8;
768 	}
769 }
770 /**
771  * Reinterprets an array as the requested type.
772  */
773 package T[] reinterpretCastA(T,U)(U[] input) pure @trusted{
774 	T[] _reinterpretCastA() pure @system{
775 		return cast(T[])(cast(void[])input);
776 	}
777 	if ((U.sizeof * input.length) % T.sizeof == 0)
778 		return _reinterpretCastA();
779 	else
780 		throw new Exception("Reinterpretation error!");
781 }
782 /**
783  * Reinterprets an array as the requested type.
784  */
785 package T[] reinterpretCast(T,U)(U input) pure @trusted{
786 	T[] _reinterpretCast() pure @system{
787 		return cast(T[])(cast(void[])[input]);
788 	}
789 	if (U.sizeof % T.sizeof == 0)
790 		return _reinterpretCast();
791 	else
792 		throw new Exception("Reinterpretation error!");
793 	
794 }
795 /**
796  * Gets a certain type from an array.
797  */
798 package T reinterpretGet(T,U)(U[] input) pure @trusted{
799 	T _reinterpretGet() pure @system{
800 		return *(cast(T*)(cast(void*)input.ptr));
801 	}
802 	if (input.length == T.sizeof)
803 		return _reinterpretGet();
804 	else
805 		throw new Exception("Reinterpretation error!");
806 }
807 /**
808  * Thrown on checksum errors
809  */
810 public class BadChecksumException : Exception{
811 	@nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable nextInChain = null)
812     {
813         super(msg, file, line, nextInChain);
814     }
815 
816     @nogc @safe pure nothrow this(string msg, Throwable nextInChain, string file = __FILE__, size_t line = __LINE__)
817     {
818         super(msg, file, line, nextInChain);
819     }
820 }
821 /**
822  * Thrown on compression errors
823  */
824 public class CompressionException : Exception{
825 	@nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable nextInChain = null)
826     {
827         super(msg, file, line, nextInChain);
828     }
829 
830     @nogc @safe pure nothrow this(string msg, Throwable nextInChain, string file = __FILE__, size_t line = __LINE__)
831     {
832         super(msg, file, line, nextInChain);
833     }
834 }