1 module datapak;
2 
3 public import vfile;
4 import bindbc.zstandard.zstd;
5 import etc.c.zlib;
6 import std.digest;
7 import std.digest.murmurhash;
8 import std.digest.md;
9 import std.digest.crc;
10 import std.bitmanip;
11 import std.stdio;
12 import std..string : toStringz, fromStringz;
13 
14 /**
15  * DataPak (*.dpk) is mainly intended as a compression method for application assets. Technically it can store folder info,
16  * but the fixed filename length is shorter to save some extra space. There's some support for expanding the path, but
17  * currently unimplemented.
18  * 
19  * General layout of a file:
20  * <ul>
21  * <li>DataPak signature</li>
22  * <li>Header</li>
23  * <li>Extension area. Ignored by default, handling must be implemented by whoever wants to use it. Can be compressed alongside with the main
24  * data to save some space (not recommended if it's needed for compression, e.g. dictionaries), but only if the index field is also compressed.</li>
25  * <li>Array of DataPak indexes. Each entry can have some extension. Can be compressed alongside with the main data to save some space.</li>
26  * <li>CRC32 checksum at the begining of compressed block or at the end of file information description table.</li>
27  * <li>Data</li>
28  * </ul>
29  */
30 public class DataPak{
31 	///Every DataPak file begins with this.
32 	///The dot will be replaced with numbers if I ever decide to make any upgrades to the format
33 	static enum char[8] SIGNATURE = "DataPak.";		
34 	/**
35 	 * Default compression methods for the file.
36 	 */
37 	enum CompressionMethod : char[8]{
38 		uncompressed		=	"UNCMPRSD",
39 		deflate				=	"ZLIB    ",			
40 		zstandard			=	"ZSTD    ",			
41 		//the following algorithms are not yet implemented, but probably will be
42 		lz4					=	"LZ4     "
43 	}
44 	/**
45 	 * Selects between checksums.
46 	 * Please note that the more bytes the checksum needs, the less will be left for the filename.
47 	 * Values between 32-63 can be used for custom implementations.
48 	 */
49 	public enum ChecksumType : ubyte{
50 		none				=	0,
51 		ripeMD				=	1,
52 		murmurhash32_32		=	2,
53 		murmurhash128_32	=	3,
54 		murmurhash128_64	=	4,
55 		sha224				=	5,
56 		sha256				=	6,
57 		sha384				=	7,
58 		sha512				=	8,
59 		sha512_224			=	9,
60 		sha512_256			=	10,
61 		md5					=	11,
62 		crc32				=	12,
63 		crc64				=	13,///ISO only!
64 
65 	}
66 	/**
67 	 * Stores the length of each checksum result
68 	 */
69 	package static immutable ubyte[14] CHECKSUM_LENGTH = [0, 20, 4, 16, 16, 28, 32, 48, 64, 64, 64, 16, 4, 8];
70 	/**
71 	 * Stores important informations about the file.
72 	 */
73 	public struct Header{
74 	align(1):
75 		ulong		indexSize;		///Size of the index field in bytes, including the extension fields
76 		ulong		decompSize;		///Total decompressed size
77 		char[8]		compMethod;		///Compression method stored as a string
78 		uint		extFieldSize;	///Extension area size
79 		uint		numOfIndexes;	///Total number of file indexes
80 		mixin(bitfields!(
81 			bool, "compIndex", 		1,	///If high, the idexes will be compressed
82 			bool, "compExtField", 	1,	///If high, the extension field will be compressed
83 			ubyte, "checksumType",	6,	///Type of checksum for the files
84 			ubyte, "compLevel",		6,	///Compression level if needed
85 			uint, "",				18,	///Reserved for future use
86 		));
87 		//uint		padding;
88 	}
89 	/**
90 	 * Index representing data for a file.
91 	 */
92 	public struct Index{
93 	align(1):
94 		ulong 		offset;			///Points to where the file begins in the decompressed stream
95 		ushort		extFieldSize;	///Extension area size for the index
96 		ushort		sizeH;			///The upper 16 bits of the file's size
97 		uint		sizeL;			///The lower 32 bits of the file's size
98 		char[112]	field;			///Name of the file terminated with a null character + checksum in the end
99 		///Returns the filename into a string
100 		public @property string filename() @safe pure nothrow{
101 			string result;
102 			size_t pos;
103 			while(field.length > pos && field[pos] != 0xFF){
104 				result ~= field[pos];
105 				pos++;
106 			}
107 			return result;
108 		}
109 		///Sets the filename
110 		public @property string filename(string val) @safe @nogc pure nothrow{
111 			/*for(int i ; i < val.length && i < field.length ; i++){
112 				field[i] = val[i];
113 			}*/
114 			foreach(i , c ; val){
115 				field[i] = c;
116 			}
117 			return val;
118 		}
119 		///Returns the checksum/hash
120 		public ubyte[N] checksum(int N = 16)() @safe @nogc pure nothrow{
121 			ubyte[N] result;
122 			for(int i ; i < N ; i++){
123 				result[i] = field[field.length - N + i];
124 			}
125 			return result;
126 		}
127 		///Sets the checksum/hash
128 		public ubyte[N] checksum(int N = 16)(ubyte[N] val) @safe @nogc pure nothrow{
129 			for(int i ; i < N ; i++){
130 				field[field.length - N + i] = val[i];
131 			}
132 			return val;
133 		}
134 	}
135 	protected Header header;
136 	protected File file;
137 	protected Index[] indexes;
138 	protected string[] paths;			///Only used during compression
139 	protected uint nextIndex;
140 	protected ubyte[] extField;
141 	protected ubyte[][uint] indexExtFields;
142 	protected bool readOnly, createNew;
143 	protected void* compStream;
144 	protected ubyte[] readBuf, compBuf;
145 	protected ZSTD_inBuffer inBuff;
146 	protected ZSTD_outBuffer outBuff;
147 	protected size_t prevOutPos;		///0 if no data left from previous decompression
148 	protected ulong compPos, compPos0;	///Current position of data; position of all currently decompressed data
149 
150 	public pure void delegate(size_t pos) progress;	///Called to inform host on decompression process (will be replaced with something more useful in the future)
151 
152 	public static bool enableThrowOnChecksumError = true;	///Doesn't throw on errors, returns data regardless of error
153 	public static size_t readBufferSize = 32 * 1024; ///Sets the read buffer size of all instances (default is 32kB)
154 	/**
155 	 * Loads a DataPak file from disk for reading.
156 	 */
157 	public this(string filename){
158 		this(File(filename));
159 		readOnly = true;
160 	}
161 	///Ditto
162 	public this(File f){
163 		CRC32 chkSmCalc = CRC32();
164 		ubyte[4] crc;
165 		char[] signature;
166 		signature.length = SIGNATURE.length;
167 		signature = f.rawRead(signature);
168 		//check for correct file signature
169 		//if(signature != SIGNATURE)
170 		foreach(i ,c ; signature)
171 			if(SIGNATURE[i] != c)
172 				throw new Exception("File isn't DataPak file");
173 		chkSmCalc.put(reinterpretCastA!ubyte(signature));
174 		readBuf.length = Header.sizeof;
175 		readBuf = f.rawRead(readBuf);
176 		header = reinterpretGet!Header(readBuf);
177 		//ubyte[4] chkSm = header.checksum;
178 		//header.checksum = [0x0, 0x0, 0x0, 0x0];
179 		//readBuf.length -= 4;
180 		//readBuf.length += 4;
181 		file = f;
182 		chkSmCalc.put(readBuf);
183 		initDecomp;
184 		if(header.extFieldSize){
185 			if(!header.compExtField){
186 				extField.length = header.extFieldSize;
187 				//f.rawRead(extField);
188 				extField = f.rawRead(extField);
189 			}else{
190 				
191 				f.rawRead(crc);
192 				extField = decompressFromFile(header.extFieldSize);
193 			}
194 			chkSmCalc.put(extField);
195 		}
196 		if(!header.compIndex){
197 			indexes.length = header.numOfIndexes;
198 			readBuf.length = Index.sizeof;
199 			for(int i; i < indexes.length; i++){
200 				//fread(readBuf.ptr, readBuf.length, 1, f);
201 				readBuf = f.rawRead(readBuf);
202 				indexes[i] = reinterpretGet!Index(readBuf);
203 				chkSmCalc.put(readBuf);
204 				if(indexes[i].extFieldSize){
205 					readBuf.length = indexes[i].extFieldSize;
206 					readBuf = f.rawRead(readBuf);
207 					chkSmCalc.put(readBuf);
208 					readBuf.length = Index.sizeof;
209 					indexExtFields[i] = readBuf.dup;
210 				}
211 			}
212 			f.rawRead(crc);
213 		}else{
214 			if(!header.compExtField)
215 				f.rawRead(crc);
216 			ubyte[] temp = decompressFromFile(header.indexSize);
217 			ubyte* tempPtr = temp.ptr;
218 			chkSmCalc.put(temp);
219 			for(int i; i < indexes.length; i++){
220 				indexes[i] = *(cast(Index*)(cast(void*)tempPtr));
221 				tempPtr += Index.sizeof;
222 				if(indexes[i].extFieldSize){
223 					indexExtFields[i] = tempPtr[0..indexes[i].extFieldSize].dup;
224 					tempPtr += indexes[i].extFieldSize;
225 				}
226 			}
227 		}
228 
229 		const ubyte[4] checksum = chkSmCalc.finish();
230 		if(crc != checksum && enableThrowOnChecksumError){
231 			throw new BadChecksumException("CRC32 error in header/index");
232 		}
233 		
234 	}
235 	/**
236 	 * Creates a DataPak file from scratch.
237 	 */
238 	this(Header header, string targetName, ubyte[] extField = []){
239 		file = File(targetName, "wb");
240 		this.header = header;
241 		createNew = true;
242 		initComp;
243 	}
244 	~this(){
245 		//fclose(file);
246 		//deinitialize compression
247 		switch(header.compMethod){
248 			case CompressionMethod.zstandard:
249 				if(createNew){
250 					ZSTD_freeCStream(cast(ZSTD_CStream*)compStream);
251 				}else{
252 					ZSTD_freeDStream(cast(ZSTD_DStream*)compStream);
253 				}
254 				break;
255 			default:	
256 				break;
257 		}
258 	}
259 	/**
260 	 * Adds a file to be compressed later.
261 	 * Returns the created index for it.
262 	 */
263 	public Index addFile(string filename, string newName = null, ubyte[] indexExtField = []){
264 		Index result;
265 		if(!newName.length){
266 			newName = filename;
267 		}
268 		result.filename = newName;
269 
270 		ubyte[] buffer;
271 		buffer.length = 32*1024;
272 		File f = File(filename);
273 		result.sizeL = cast(uint)(f.size);
274 		result.sizeH = cast(ushort)(f.size>>32);
275 		//Calculate checksums if needed
276 		size_t remain = cast(size_t)f.size;
277 		switch(header.checksumType){
278 			case ChecksumType.murmurhash32_32:
279 				MurmurHash3!(32,32) checksum = MurmurHash3!(32, 32)(0x66_69_6c_65);
280 				while(remain > buffer.length){
281 					f.rawRead(buffer);
282 					checksum.put(buffer);
283 					remain -= remain >= buffer.length ? buffer.length : remain;
284 				}
285 				if(remain){
286 					buffer.length = remain;
287 					f.rawRead(buffer);
288 					checksum.put(buffer);
289 				}
290 				checksum.finalize();
291 				const ubyte[4] chksRes = checksum.getBytes;
292 				result.checksum(chksRes);
293 				break;
294 			case ChecksumType.murmurhash128_32:
295 				MurmurHash3!(128,32) checksum = MurmurHash3!(128, 32)(0x66_69_6c_65);
296 				while(remain > buffer.length){
297 					f.rawRead(buffer);
298 					checksum.put(buffer);
299 					remain -= remain >= buffer.length ? buffer.length : remain;
300 				}
301 				if(remain){
302 					buffer.length = remain;
303 					f.rawRead(buffer);
304 					checksum.put(buffer);
305 				}
306 				checksum.finalize();
307 				const ubyte[16] chksRes = checksum.getBytes;
308 				result.checksum(chksRes);
309 				break;
310 			case ChecksumType.murmurhash128_64:
311 				MurmurHash3!(128,64) checksum = MurmurHash3!(128, 64)(0x66_69_6c_65);
312 				while(remain > buffer.length){
313 					f.rawRead(buffer);
314 					checksum.put(buffer);
315 					remain -= remain >= buffer.length ? buffer.length : remain;
316 				}
317 				if(remain){
318 					buffer.length = remain;
319 					f.rawRead(buffer);
320 					checksum.put(buffer);
321 				}
322 				checksum.finalize();
323 				const ubyte[16] chksRes = checksum.getBytes;
324 				result.checksum(chksRes);
325 				break;
326 			default:
327 				break;
328 		}
329 		result.offset = compPos;
330 		compPos += f.size;
331 		result.extFieldSize = cast(ushort)indexExtField.length;
332 		indexes ~= result;
333 		if(indexExtField.length)
334 			indexExtFields[cast(uint)(indexes.length - 1)] = indexExtField;
335 		header.decompSize += f.size;
336 		header.indexSize += Index.sizeof + indexExtField.length;
337 		header.numOfIndexes = cast(uint)indexes.length;
338 		return result;
339 	}
340 	/**
341 	 * Initializes compression.
342 	 */
343 	protected void initComp(){
344 		if(compStream) return;
345 		switch(header.compMethod){
346 			case CompressionMethod.uncompressed:
347 				break;
348 			case CompressionMethod.zstandard:
349 				compStream = ZSTD_createCStream();
350 				//const size_t result = ZSTD_initCStream(cast(ZSTD_CStream*)compStream, header.compLevel);
351 				//readBuf.length = result;
352 				ZSTD_CCtx_reset(cast(ZSTD_CStream*)compStream, ZSTD_ResetDirective.ZSTD_reset_session_only);
353 				ZSTD_CCtx_setParameter(cast(ZSTD_CStream*)compStream, ZSTD_cParameter.ZSTD_c_compressionLevel, header.compLevel);
354 				inBuff = ZSTD_inBuffer(readBuf.ptr, readBuf.length, 0);
355 				//compBuf.length = ZSTD_CStreamOutSize();
356 				compBuf.length = readBufferSize;
357 				outBuff = ZSTD_outBuffer(compBuf.ptr, compBuf.length, 0);
358 				break;
359 			default:
360 				throw new Exception("Unknown compression method");
361 		}
362 	}
363 	/**
364 	 * Initializes decompression.
365 	 */
366 	protected void initDecomp(){
367 		if(compStream) return;
368 		switch(header.compMethod){
369 			case CompressionMethod.uncompressed:
370 				break;
371 			case CompressionMethod.zstandard:
372 				compStream = ZSTD_createDStream();
373 				ZSTD_initDStream(cast(ZSTD_DStream*)compStream);
374 				//writeln(result);
375 				readBuf.length = readBufferSize;
376 				inBuff = ZSTD_inBuffer(readBuf.ptr, readBuf.length, 0);
377 				//compBuf.length = ZSTD_DStreamOutSize();
378 				//outBuff = ZSTD_outBuffer(compBuf.ptr, compBuf.length, 0);
379 				break;
380 			default:
381 				throw new Exception("Unknown compression method");
382 		}
383 	}
384 	/**
385 	 * Returns a given index.
386 	 */
387 	public Index getIndex(uint i){
388 		if(i >= indexes.length)
389 			return Index.init;
390 		else
391 			return indexes[i];
392 	}
393 	/**
394 	 * Returns the index of next file.
395 	 */
396 	public Index getNextIndex(){
397 		if(nextIndex >= indexes.length)
398 			return Index.init;
399 		else
400 			return indexes[nextIndex];
401 	}
402 	/**
403 	 * Returns the next file as a VFile.
404 	 */
405 	/+public VFile getNextAsVFile(){
406 		VFile result = VFile.__ctor!ubyte(getNextAsArray);
407 		return result;
408 	}+/
409 	/**
410 	 * Returns the next file as an ubyte[] array.
411 	 */
412 	public ubyte[] getNextAsArray(){
413 		if(nextIndex >= indexes.length)
414 			return [];
415 		static if(size_t.sizeof == 4)
416 			ubyte[] result = decompressFromFile(indexes[nextIndex].sizeL);
417 		else{
418 			ubyte[] result = decompressFromFile(cast(ulong)indexes[nextIndex].sizeL | cast(ulong)indexes[nextIndex].sizeH<<32);
419 		}
420 
421 		nextIndex++;
422 		return result;
423 	}
424 	/**
425 	 * Checks the integrity of a file.
426 	 */
427 	protected bool checkFile(ubyte[] data, ubyte[] checksum){
428 		switch(header.checksumType){
429 			case ChecksumType.murmurhash32_32:
430 				MurmurHash3!(32,32) chkCalc = MurmurHash3!(32, 32)(0x66_69_6c_65);
431 				chkCalc.put(data);
432 				const ubyte[4] result = chkCalc.finish;
433 				if(result != checksum)
434 					return false;
435 				return true;
436 			case ChecksumType.murmurhash128_32:
437 				MurmurHash3!(128,32) chkCalc = MurmurHash3!(128, 32)(0x66_69_6c_65);
438 				chkCalc.put(data);
439 				const ubyte[16] result = chkCalc.finish;
440 				if(result != checksum)
441 					return false;
442 				return true;
443 			case ChecksumType.murmurhash128_64:
444 				MurmurHash3!(128,64) chkCalc = MurmurHash3!(128, 64)(0x66_69_6c_65);
445 				chkCalc.put(data);
446 				const ubyte[16] result = chkCalc.finish;
447 				if(result != checksum)
448 					return false;
449 				return true;
450 			default:
451 				return true;
452 		}
453 	}
454 	/**
455 	 * Begins compression into file.
456 	 */
457 	public void finalize(){
458 		CRC32 chkSmCalc = CRC32();
459 		file.rawWrite(SIGNATURE);
460 		chkSmCalc.put(reinterpretCast!ubyte(SIGNATURE));
461 		file.rawWrite([header]);
462 		chkSmCalc.put(reinterpretCast!ubyte(header));
463 		if(!header.compIndex && !header.compExtField){
464 			if(extField.length)
465 				file.rawWrite(extField);
466 				chkSmCalc.put(extField);
467 			foreach(n, i; indexes){
468 				file.rawWrite([i]);
469 				chkSmCalc.put(reinterpretCast!ubyte(i));
470 				if(indexExtFields.get(cast(uint)n, null) !is null){
471 					file.rawWrite(indexExtFields[cast(uint)n]);
472 					chkSmCalc.put(indexExtFields[cast(uint)n]);
473 				}
474 			}
475 			const ubyte[4] checksum = chkSmCalc.finish;
476 			file.rawWrite(checksum);
477 		}else
478 			throw new Exception("Feature not yet implemented");
479 		//write each files in order of access
480 		foreach(n, i; indexes){
481 			this.compress(i.filename, i);
482 		}
483 		switch(header.compMethod){
484 			case CompressionMethod.zstandard:
485 				size_t remaining;
486 				do{
487 					remaining = ZSTD_compressStream2(cast(ZSTD_CStream*)compStream, &outBuff, &inBuff, ZSTD_EndDirective.ZSTD_e_end);
488 					if(outBuff.size)
489 						file.rawWrite(outBuff.dst[0..outBuff.pos]);
490 					outBuff.pos = 0;
491 				}while(remaining);
492 				break;
493 			default:
494 				break;
495 		}
496 	}
497 	/**
498 	 * Compresses a single file into the stream.
499 	 */
500 	protected void compress(string source, Index index){
501 		File src = File(source, "rb");
502 		switch(header.compMethod){
503 			case CompressionMethod.uncompressed://in this case, we just want to copy the raw data into the file
504 				ubyte[] buffer;
505 				buffer.length = readBufferSize;
506 				/*while(src.tell + buffer.length >= src.size){
507 					src.rawRead(buffer);
508 					file.rawWrite(buffer);
509 				}*/
510 				do{
511 					buffer = src.rawRead(buffer);
512 					if(buffer.length)
513 						file.rawWrite(buffer);
514 				}while(buffer.length == readBufferSize);
515 				/*if(src.size - src.tell){
516 					buffer.length = cast(size_t)(src.size - src.tell);
517 					src.rawRead(buffer);
518 					file.rawWrite(buffer);
519 				}*/
520 				break;
521 			case CompressionMethod.zstandard:
522 				size_t compSize;
523 				readBuf.length = readBufferSize;
524 				do{
525 					readBuf = src.rawRead(readBuf);
526 					inBuff.src = readBuf.ptr;
527 					inBuff.pos = 0;
528 					inBuff.size = readBuf.length;
529 					while(inBuff.pos < inBuff.size){
530 						compSize = ZSTD_compressStream2(cast(ZSTD_CStream*)compStream, &outBuff, &inBuff, ZSTD_EndDirective.ZSTD_e_continue);
531 						if(ZSTD_isError(compSize)){
532 							throw new CompressionException(cast(string)(fromStringz(ZSTD_getErrorName(compSize))));
533 						}
534 						//writeln(source, ": ", compSize,"; ;",outBuff.pos);
535 						//fwrite(outBuff.dst, compSize, 1, file);
536 						if(outBuff.pos)
537 							file.rawWrite(outBuff.dst[0..outBuff.pos]);
538 						outBuff.pos = 0;
539 					}
540 					inBuff.size = readBuf.length;
541 				}while(readBuf.length == readBufferSize);
542 				//Flush to disk
543 				do{
544 					compSize = ZSTD_compressStream2(cast(ZSTD_CStream*)compStream, &outBuff, &inBuff, ZSTD_EndDirective.ZSTD_e_flush);
545 					//writeln(source, ": ", compSize,"; ",outBuff.pos);
546 					if(ZSTD_isError(compSize))
547 						throw new CompressionException(cast(string)fromStringz(ZSTD_getErrorName(compSize)));
548 					if(outBuff.pos)
549 						file.rawWrite(outBuff.dst[0..outBuff.pos]);
550 					outBuff.pos = 0;
551 				}while(compSize);
552 				
553 				outBuff.pos = 0;
554 				
555 				break;
556 			case CompressionMethod.deflate:
557 				break;
558 			default:
559 				//fclose(src);
560 				throw new Exception("Unknown compression method");
561 		}
562 		//fclose(src);
563 	}
564 	/**
565 	 * Decompresses a given amount from the file from the current position.
566 	 */
567 	protected ubyte[] decompressFromFile(const size_t amount){
568 		ubyte[] output;
569 		output.length = amount;
570 		//size_t curAmount;
571 		//writeln("compPos: ",compPos);
572 		switch(header.compMethod){
573 			case CompressionMethod.uncompressed://in this case, we just want to read regular data from file
574 				
575 				//fread(output.ptr, output.length, 1, file);
576 				output = file.rawRead(output);
577 				if(output.length != amount)
578 					throw new Exception("EOF reached earlier than expected from header/indexing");
579 				break;
580 			case CompressionMethod.zstandard:
581 				//Try if we can get away with setting the output buffer the exact size a file needed, so we can avoid issues from overlapping decompression
582 				//output.length = amount;
583 				ZSTD_outBuffer localOutBuf = ZSTD_outBuffer(output.ptr, output.length, 0);
584 				//size_t prevPos;
585 				do{
586 					if(inBuff.size == inBuff.pos || !compPos){
587 						inBuff.pos = 0;
588 						//fread(readBuf.ptr, readBuf.length, 1, file);
589 						readBuf = file.rawRead(readBuf);
590 						inBuff.src = readBuf.ptr;
591 						inBuff.size = readBuf.length;
592 					}
593 					//writeln("readBuf.length: ",readBuf.length);
594 					const size_t result = ZSTD_decompressStream(cast(ZSTD_DStream*)compStream, &localOutBuf, &inBuff);
595 					//writeln("inBuff.size: ",inBuff.size);
596 					if(ZSTD_isError(result)){
597 						throw new CompressionException(cast(string)(fromStringz(ZSTD_getErrorName(result))));
598 					}else{
599 						//compPos += localOutBuf.pos - prevPos;
600 						//prevPos = localOutBuf.pos;
601 					}
602 				} while (localOutBuf.size > localOutBuf.pos);
603 				break;
604 			case CompressionMethod.deflate:
605 				break;
606 			default:
607 				throw new Exception("Unknown compression method");
608 		}
609 		compPos += amount;
610 		readBuf.length = readBufferSize;
611 		return output;
612 	}
613 
614 	public static void loadZSTD(){
615 		import bindbc.zstandard.dynload;
616 		import bindbc.zstandard.config;
617 		ZSTDSupport result = loadZstandard();
618 		if(result == ZSTDSupport.noLibrary || result == ZSTDSupport.badLibrary)
619 			throw new Exception("ZSTD not found!");
620 	}
621 	public static void loadZSTD(string lib){
622 		import bindbc.zstandard.dynload;
623 		import bindbc.zstandard.config;
624 		ZSTDSupport result = loadZstandard(toStringz(lib));
625 		if(result == ZSTDSupport.noLibrary || result == ZSTDSupport.badLibrary)
626 			throw new Exception("ZSTD not found!");
627 	}
628 }
629 
630 unittest{
631 	DataPak.Index index;
632 	index.filename = "something";
633 	writeln(index.field);
634 	assert(index.filename == "something");
635 }
636 /**
637  * Default extension for adding general support for using it as a regular file archival tool
638  */
639 struct DataPak_OSExt{
640 align(1):
641 	char[6]		id = "OSExt ";			///Identifies that this field is a DataPak_OSExt struct
642 	ushort		size = 256;				///Size of this field
643 	char[160]	path;					///Stores the relative path of the file
644 	char[32]	ownerUserID;			///Owner's ID on POSIX systems
645 	char[32]	ownerUserGroup;			///Owner's group on POSIX systems
646 	ulong		creationDate;			///Creation date in 64 bit POSIX time format
647 	ulong		modifyDate;				///Modification date in 64 bit POSIX time format
648 	ulong		field;					///Unused by default, can store attributes if needed
649 }
650 /**
651  * Reinterprets an array as the requested type.
652  */
653 package T[] reinterpretCastA(T,U)(U[] input) pure @trusted{
654 	T[] _reinterpretCastA() pure @system{
655 		return cast(T[])(cast(void[])input);
656 	}
657 	if ((U.sizeof * input.length) % T.sizeof == 0)
658 		return _reinterpretCastA();
659 	else
660 		throw new Exception("Reinterpretation error!");
661 }
662 /**
663  * Reinterprets an array as the requested type.
664  */
665 package T[] reinterpretCast(T,U)(U input) pure @trusted{
666 	T[] _reinterpretCast() pure @system{
667 		return cast(T[])(cast(void[])[input]);
668 	}
669 	if (U.sizeof % T.sizeof == 0)
670 		return _reinterpretCast();
671 	else
672 		throw new Exception("Reinterpretation error!");
673 	
674 }
675 /**
676  * Gets a certain type from an array.
677  */
678 package T reinterpretGet(T,U)(U[] input) pure @trusted{
679 	T _reinterpretGet() pure @system{
680 		return *(cast(T*)(cast(void*)input.ptr));
681 	}
682 	if (input.length == T.sizeof)
683 		return _reinterpretGet();
684 	else
685 		throw new Exception("Reinterpretation error!");
686 }
687 /**
688  * Thrown on checksum errors
689  */
690 public class BadChecksumException : Exception{
691 	@nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable nextInChain = null)
692     {
693         super(msg, file, line, nextInChain);
694     }
695 
696     @nogc @safe pure nothrow this(string msg, Throwable nextInChain, string file = __FILE__, size_t line = __LINE__)
697     {
698         super(msg, file, line, nextInChain);
699     }
700 }
701 /**
702  * Thrown on compression errors
703  */
704 public class CompressionException : Exception{
705 	@nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable nextInChain = null)
706     {
707         super(msg, file, line, nextInChain);
708     }
709 
710     @nogc @safe pure nothrow this(string msg, Throwable nextInChain, string file = __FILE__, size_t line = __LINE__)
711     {
712         super(msg, file, line, nextInChain);
713     }
714 }