1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.io.hfile;
19
20 import java.io.ByteArrayInputStream;
21 import java.io.ByteArrayOutputStream;
22 import java.io.DataInputStream;
23 import java.io.DataOutput;
24 import java.io.DataOutputStream;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.nio.ByteBuffer;
28 import java.util.concurrent.locks.Lock;
29 import java.util.concurrent.locks.ReentrantLock;
30
31 import org.apache.hadoop.classification.InterfaceAudience;
32 import org.apache.hadoop.fs.FSDataInputStream;
33 import org.apache.hadoop.fs.FSDataOutputStream;
34 import org.apache.hadoop.fs.Path;
35 import org.apache.hadoop.hbase.HConstants;
36 import org.apache.hadoop.hbase.fs.HFileSystem;
37 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
38 import org.apache.hadoop.hbase.io.compress.Compression;
39 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
40 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
41 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
42 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
43 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
44 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
45 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
46 import org.apache.hadoop.hbase.util.Bytes;
47 import org.apache.hadoop.hbase.util.ChecksumType;
48 import org.apache.hadoop.hbase.util.ClassSize;
49 import org.apache.hadoop.hbase.util.CompoundBloomFilter;
50 import org.apache.hadoop.io.IOUtils;
51
52 import com.google.common.base.Preconditions;
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85 @InterfaceAudience.Private
86 public class HFileBlock implements Cacheable {
87
88
89 static final int MINOR_VERSION_WITH_CHECKSUM = 1;
90
91
92 static final int MINOR_VERSION_NO_CHECKSUM = 0;
93
94
95
96
97
98
99 static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3;
100
101 public static final boolean FILL_HEADER = true;
102 public static final boolean DONT_FILL_HEADER = false;
103
104
105
106
107
108 public static final int ENCODED_HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE
109 + DataBlockEncoding.ID_SIZE;
110
111 static final byte[] DUMMY_HEADER_NO_CHECKSUM =
112 new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM];
113
114 public static final int BYTE_BUFFER_HEAP_SIZE = (int) ClassSize.estimateBase(
115 ByteBuffer.wrap(new byte[0], 0, 0).getClass(), false);
116
117
118 public static final int EXTRA_SERIALIZATION_SPACE = 2 * Bytes.SIZEOF_INT
119 + Bytes.SIZEOF_LONG;
120
121
122
123
124 static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT;
125
126 private static final CacheableDeserializer<Cacheable> blockDeserializer =
127 new CacheableDeserializer<Cacheable>() {
128 public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{
129 buf.limit(buf.limit() - HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind();
130 ByteBuffer newByteBuffer;
131 if (reuse) {
132 newByteBuffer = buf.slice();
133 } else {
134 newByteBuffer = ByteBuffer.allocate(buf.limit());
135 newByteBuffer.put(buf);
136 }
137 buf.position(buf.limit());
138 buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE);
139 int minorVersion=buf.getInt();
140 HFileBlock ourBuffer = new HFileBlock(newByteBuffer, minorVersion);
141 ourBuffer.offset = buf.getLong();
142 ourBuffer.nextBlockOnDiskSizeWithHeader = buf.getInt();
143 return ourBuffer;
144 }
145
146 @Override
147 public int getDeserialiserIdentifier() {
148 return deserializerIdentifier;
149 }
150
151 @Override
152 public HFileBlock deserialize(ByteBuffer b) throws IOException {
153 return deserialize(b, false);
154 }
155 };
156 private static final int deserializerIdentifier;
157 static {
158 deserializerIdentifier = CacheableDeserializerIdManager
159 .registerDeserializer(blockDeserializer);
160 }
161
162 private BlockType blockType;
163
164
165 private int onDiskSizeWithoutHeader;
166
167
168 private final int uncompressedSizeWithoutHeader;
169
170
171 private final long prevBlockOffset;
172
173
174 private final byte checksumType;
175
176
177 private final int bytesPerChecksum;
178
179
180 private final int onDiskDataSizeWithHeader;
181
182
183 private final int minorVersion;
184
185
186 private ByteBuffer buf;
187
188
189 private boolean includesMemstoreTS;
190
191
192
193
194
195 private long offset = -1;
196
197
198
199
200
201
202 private int nextBlockOnDiskSizeWithHeader = -1;
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
229 int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer buf,
230 boolean fillHeader, long offset, boolean includesMemstoreTS,
231 int minorVersion, int bytesPerChecksum, byte checksumType,
232 int onDiskDataSizeWithHeader) {
233 this.blockType = blockType;
234 this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
235 this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
236 this.prevBlockOffset = prevBlockOffset;
237 this.buf = buf;
238 if (fillHeader)
239 overwriteHeader();
240 this.offset = offset;
241 this.includesMemstoreTS = includesMemstoreTS;
242 this.minorVersion = minorVersion;
243 this.bytesPerChecksum = bytesPerChecksum;
244 this.checksumType = checksumType;
245 this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
246 }
247
248
249
250
251
252
253
254
255
256 HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
257 b.rewind();
258 blockType = BlockType.read(b);
259 onDiskSizeWithoutHeader = b.getInt();
260 uncompressedSizeWithoutHeader = b.getInt();
261 prevBlockOffset = b.getLong();
262 this.minorVersion = minorVersion;
263 if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
264 this.checksumType = b.get();
265 this.bytesPerChecksum = b.getInt();
266 this.onDiskDataSizeWithHeader = b.getInt();
267 } else {
268 this.checksumType = ChecksumType.NULL.getCode();
269 this.bytesPerChecksum = 0;
270 this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
271 HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
272 }
273 buf = b;
274 buf.rewind();
275 }
276
277 public BlockType getBlockType() {
278 return blockType;
279 }
280
281
282 public short getDataBlockEncodingId() {
283 if (blockType != BlockType.ENCODED_DATA) {
284 throw new IllegalArgumentException("Querying encoder ID of a block " +
285 "of type other than " + BlockType.ENCODED_DATA + ": " + blockType);
286 }
287 return buf.getShort(headerSize());
288 }
289
290
291
292
293
294 public int getOnDiskSizeWithHeader() {
295 return onDiskSizeWithoutHeader + headerSize();
296 }
297
298
299
300
301
302
303
304
305
306 public int getOnDiskSizeWithoutHeader() {
307 return onDiskSizeWithoutHeader;
308 }
309
310
311
312
313
314 public int getUncompressedSizeWithoutHeader() {
315 return uncompressedSizeWithoutHeader;
316 }
317
318
319
320
321
322 public long getPrevBlockOffset() {
323 return prevBlockOffset;
324 }
325
326
327
328
329
330 private void overwriteHeader() {
331 buf.rewind();
332 blockType.write(buf);
333 buf.putInt(onDiskSizeWithoutHeader);
334 buf.putInt(uncompressedSizeWithoutHeader);
335 buf.putLong(prevBlockOffset);
336 }
337
338
339
340
341
342
343
344
345 public ByteBuffer getBufferWithoutHeader() {
346 return ByteBuffer.wrap(buf.array(), buf.arrayOffset() + headerSize(),
347 buf.limit() - headerSize() - totalChecksumBytes()).slice();
348 }
349
350
351
352
353
354
355
356
357
358
359 public ByteBuffer getBufferReadOnly() {
360 return ByteBuffer.wrap(buf.array(), buf.arrayOffset(),
361 buf.limit() - totalChecksumBytes()).slice();
362 }
363
364
365
366
367
368
369
370
371 public ByteBuffer getBufferReadOnlyWithHeader() {
372 return ByteBuffer.wrap(buf.array(), buf.arrayOffset(), buf.limit()).slice();
373 }
374
375
376
377
378
379
380
381 ByteBuffer getBufferWithHeader() {
382 ByteBuffer dupBuf = buf.duplicate();
383 dupBuf.rewind();
384 return dupBuf;
385 }
386
387 private void sanityCheckAssertion(long valueFromBuf, long valueFromField,
388 String fieldName) throws IOException {
389 if (valueFromBuf != valueFromField) {
390 throw new AssertionError(fieldName + " in the buffer (" + valueFromBuf
391 + ") is different from that in the field (" + valueFromField + ")");
392 }
393 }
394
395
396
397
398
399
400
401 void sanityCheck() throws IOException {
402 buf.rewind();
403
404 {
405 BlockType blockTypeFromBuf = BlockType.read(buf);
406 if (blockTypeFromBuf != blockType) {
407 throw new IOException("Block type stored in the buffer: " +
408 blockTypeFromBuf + ", block type field: " + blockType);
409 }
410 }
411
412 sanityCheckAssertion(buf.getInt(), onDiskSizeWithoutHeader,
413 "onDiskSizeWithoutHeader");
414
415 sanityCheckAssertion(buf.getInt(), uncompressedSizeWithoutHeader,
416 "uncompressedSizeWithoutHeader");
417
418 sanityCheckAssertion(buf.getLong(), prevBlockOffset, "prevBlocKOffset");
419 if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
420 sanityCheckAssertion(buf.get(), checksumType, "checksumType");
421 sanityCheckAssertion(buf.getInt(), bytesPerChecksum, "bytesPerChecksum");
422 sanityCheckAssertion(buf.getInt(), onDiskDataSizeWithHeader,
423 "onDiskDataSizeWithHeader");
424 }
425
426 int cksumBytes = totalChecksumBytes();
427 int hdrSize = headerSize();
428 int expectedBufLimit = uncompressedSizeWithoutHeader + headerSize() +
429 cksumBytes;
430 if (buf.limit() != expectedBufLimit) {
431 throw new AssertionError("Expected buffer limit " + expectedBufLimit
432 + ", got " + buf.limit());
433 }
434
435
436
437 int size = uncompressedSizeWithoutHeader + hdrSize + cksumBytes;
438 if (buf.capacity() != size &&
439 buf.capacity() != size + hdrSize) {
440 throw new AssertionError("Invalid buffer capacity: " + buf.capacity() +
441 ", expected " + size + " or " + (size + hdrSize));
442 }
443 }
444
445 @Override
446 public String toString() {
447 return "blockType="
448 + blockType
449 + ", onDiskSizeWithoutHeader="
450 + onDiskSizeWithoutHeader
451 + ", uncompressedSizeWithoutHeader="
452 + uncompressedSizeWithoutHeader
453 + ", prevBlockOffset="
454 + prevBlockOffset
455 + ", dataBeginsWith="
456 + Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(),
457 Math.min(32, buf.limit() - buf.arrayOffset() - headerSize()))
458 + ", fileOffset=" + offset;
459 }
460
461 private void validateOnDiskSizeWithoutHeader(
462 int expectedOnDiskSizeWithoutHeader) throws IOException {
463 if (onDiskSizeWithoutHeader != expectedOnDiskSizeWithoutHeader) {
464 String blockInfoMsg =
465 "Block offset: " + offset + ", data starts with: "
466 + Bytes.toStringBinary(buf.array(), buf.arrayOffset(),
467 buf.arrayOffset() + Math.min(32, buf.limit()));
468 throw new IOException("On-disk size without header provided is "
469 + expectedOnDiskSizeWithoutHeader + ", but block "
470 + "header contains " + onDiskSizeWithoutHeader + ". " +
471 blockInfoMsg);
472 }
473 }
474
475
476
477
478
479
480
481
482
483 private void allocateBuffer(boolean extraBytes) {
484 int cksumBytes = totalChecksumBytes();
485 int capacityNeeded = headerSize() + uncompressedSizeWithoutHeader +
486 cksumBytes +
487 (extraBytes ? headerSize() : 0);
488
489 ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded);
490
491
492 System.arraycopy(buf.array(), buf.arrayOffset(), newBuf.array(),
493 newBuf.arrayOffset(), headerSize());
494
495 buf = newBuf;
496 buf.limit(headerSize() + uncompressedSizeWithoutHeader + cksumBytes);
497 }
498
499
500 public void assumeUncompressed() throws IOException {
501 if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader +
502 totalChecksumBytes()) {
503 throw new IOException("Using no compression but "
504 + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", "
505 + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader
506 + ", numChecksumbytes=" + totalChecksumBytes());
507 }
508 }
509
510
511
512
513
514 public void expectType(BlockType expectedType) throws IOException {
515 if (blockType != expectedType) {
516 throw new IOException("Invalid block type: expected=" + expectedType
517 + ", actual=" + blockType);
518 }
519 }
520
521
522 public long getOffset() {
523 if (offset < 0) {
524 throw new IllegalStateException(
525 "HFile block offset not initialized properly");
526 }
527 return offset;
528 }
529
530
531
532
533 public DataInputStream getByteStream() {
534 return new DataInputStream(new ByteArrayInputStream(buf.array(),
535 buf.arrayOffset() + headerSize(), buf.limit() - headerSize()));
536 }
537
538 @Override
539 public long heapSize() {
540 long size = ClassSize.align(
541 ClassSize.OBJECT +
542
543 2 * ClassSize.REFERENCE +
544
545
546 6 * Bytes.SIZEOF_INT +
547
548 1 * Bytes.SIZEOF_BYTE +
549
550 2 * Bytes.SIZEOF_LONG +
551
552 Bytes.SIZEOF_BOOLEAN
553 );
554
555 if (buf != null) {
556
557 size += ClassSize.align(buf.capacity() + BYTE_BUFFER_HEAP_SIZE);
558 }
559
560 return ClassSize.align(size);
561 }
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578 public static boolean readWithExtra(InputStream in, byte buf[],
579 int bufOffset, int necessaryLen, int extraLen) throws IOException {
580 int bytesRemaining = necessaryLen + extraLen;
581 while (bytesRemaining > 0) {
582 int ret = in.read(buf, bufOffset, bytesRemaining);
583 if (ret == -1 && bytesRemaining <= extraLen) {
584
585 break;
586 }
587
588 if (ret < 0) {
589 throw new IOException("Premature EOF from inputStream (read "
590 + "returned " + ret + ", was trying to read " + necessaryLen
591 + " necessary bytes and " + extraLen + " extra bytes, "
592 + "successfully read "
593 + (necessaryLen + extraLen - bytesRemaining));
594 }
595 bufOffset += ret;
596 bytesRemaining -= ret;
597 }
598 return bytesRemaining <= 0;
599 }
600
601
602
603
604
605 public int getNextBlockOnDiskSizeWithHeader() {
606 return nextBlockOnDiskSizeWithHeader;
607 }
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 public static class Writer {
624
625 private enum State {
626 INIT,
627 WRITING,
628 BLOCK_READY
629 };
630
631
632 private State state = State.INIT;
633
634
635 private final HFileDataBlockEncoder dataBlockEncoder;
636
637 private HFileBlockEncodingContext dataBlockEncodingCtx;
638
639
640 private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx;
641
642
643
644
645
646
647
648 private ByteArrayOutputStream baosInMemory;
649
650
651
652
653
654
655 private BlockType blockType;
656
657
658
659
660
661 private DataOutputStream userDataStream;
662
663
664
665
666
667
668 private byte[] onDiskBytesWithHeader;
669
670
671
672
673
674
675
676 private byte[] onDiskChecksum;
677
678
679
680
681
682
683
684 private byte[] uncompressedBytesWithHeader;
685
686
687
688
689
690 private long startOffset;
691
692
693
694
695
696 private long[] prevOffsetByType;
697
698
699 private long prevOffset;
700
701
702 private boolean includesMemstoreTS;
703
704
705 private ChecksumType checksumType;
706 private int bytesPerChecksum;
707
708
709
710
711
712
713
714 public Writer(Compression.Algorithm compressionAlgorithm,
715 HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS,
716 ChecksumType checksumType, int bytesPerChecksum) {
717 this.dataBlockEncoder = dataBlockEncoder != null
718 ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
719 defaultBlockEncodingCtx =
720 new HFileBlockDefaultEncodingContext(compressionAlgorithm, null, HConstants.HFILEBLOCK_DUMMY_HEADER);
721 dataBlockEncodingCtx =
722 this.dataBlockEncoder.newDataBlockEncodingContext(
723 compressionAlgorithm, HConstants.HFILEBLOCK_DUMMY_HEADER);
724
725 if (bytesPerChecksum < HConstants.HFILEBLOCK_HEADER_SIZE) {
726 throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
727 " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
728 bytesPerChecksum);
729 }
730
731 baosInMemory = new ByteArrayOutputStream();
732
733 prevOffsetByType = new long[BlockType.values().length];
734 for (int i = 0; i < prevOffsetByType.length; ++i)
735 prevOffsetByType[i] = -1;
736
737 this.includesMemstoreTS = includesMemstoreTS;
738 this.checksumType = checksumType;
739 this.bytesPerChecksum = bytesPerChecksum;
740 }
741
742
743
744
745
746
747
748 public DataOutputStream startWriting(BlockType newBlockType)
749 throws IOException {
750 if (state == State.BLOCK_READY && startOffset != -1) {
751
752
753 prevOffsetByType[blockType.getId()] = startOffset;
754 }
755
756 startOffset = -1;
757 blockType = newBlockType;
758
759 baosInMemory.reset();
760 baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
761
762 state = State.WRITING;
763
764
765 userDataStream = new DataOutputStream(baosInMemory);
766 return userDataStream;
767 }
768
769
770
771
772
773
774
775
776 DataOutputStream getUserDataStream() {
777 expectState(State.WRITING);
778 return userDataStream;
779 }
780
781
782
783
784
785 private void ensureBlockReady() throws IOException {
786 Preconditions.checkState(state != State.INIT,
787 "Unexpected state: " + state);
788
789 if (state == State.BLOCK_READY)
790 return;
791
792
793 finishBlock();
794 }
795
796
797
798
799
800
801
802 private void finishBlock() throws IOException {
803 userDataStream.flush();
804
805 uncompressedBytesWithHeader = baosInMemory.toByteArray();
806 prevOffset = prevOffsetByType[blockType.getId()];
807
808
809
810
811 state = State.BLOCK_READY;
812 if (blockType == BlockType.DATA) {
813 encodeDataBlockForDisk();
814 } else {
815 defaultBlockEncodingCtx.compressAfterEncodingWithBlockType(
816 uncompressedBytesWithHeader, blockType);
817 onDiskBytesWithHeader =
818 defaultBlockEncodingCtx.getOnDiskBytesWithHeader();
819 }
820
821 int numBytes = (int) ChecksumUtil.numBytes(
822 onDiskBytesWithHeader.length,
823 bytesPerChecksum);
824
825
826 putHeader(onDiskBytesWithHeader, 0,
827 onDiskBytesWithHeader.length + numBytes,
828 uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length);
829
830 putHeader(uncompressedBytesWithHeader, 0,
831 onDiskBytesWithHeader.length + numBytes,
832 uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length);
833
834 onDiskChecksum = new byte[numBytes];
835 ChecksumUtil.generateChecksums(
836 onDiskBytesWithHeader, 0, onDiskBytesWithHeader.length,
837 onDiskChecksum, 0, checksumType, bytesPerChecksum);
838 }
839
840
841
842
843
844 private void encodeDataBlockForDisk() throws IOException {
845
846 ByteBuffer rawKeyValues =
847 ByteBuffer.wrap(uncompressedBytesWithHeader, HConstants.HFILEBLOCK_HEADER_SIZE,
848 uncompressedBytesWithHeader.length - HConstants.HFILEBLOCK_HEADER_SIZE).slice();
849
850
851 dataBlockEncoder.beforeWriteToDisk(rawKeyValues,
852 includesMemstoreTS, dataBlockEncodingCtx, blockType);
853
854 uncompressedBytesWithHeader =
855 dataBlockEncodingCtx.getUncompressedBytesWithHeader();
856 onDiskBytesWithHeader =
857 dataBlockEncodingCtx.getOnDiskBytesWithHeader();
858 blockType = dataBlockEncodingCtx.getBlockType();
859 }
860
861
862
863
864
865
866
867
868
869 private void putHeader(byte[] dest, int offset, int onDiskSize,
870 int uncompressedSize, int onDiskDataSize) {
871 offset = blockType.put(dest, offset);
872 offset = Bytes.putInt(dest, offset, onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE);
873 offset = Bytes.putInt(dest, offset, uncompressedSize - HConstants.HFILEBLOCK_HEADER_SIZE);
874 offset = Bytes.putLong(dest, offset, prevOffset);
875 offset = Bytes.putByte(dest, offset, checksumType.getCode());
876 offset = Bytes.putInt(dest, offset, bytesPerChecksum);
877 Bytes.putInt(dest, offset, onDiskDataSize);
878 }
879
880
881
882
883
884
885
886
887
888 public void writeHeaderAndData(FSDataOutputStream out) throws IOException {
889 long offset = out.getPos();
890 if (startOffset != -1 && offset != startOffset) {
891 throw new IOException("A " + blockType + " block written to a "
892 + "stream twice, first at offset " + startOffset + ", then at "
893 + offset);
894 }
895 startOffset = offset;
896
897 finishBlockAndWriteHeaderAndData((DataOutputStream) out);
898 }
899
900
901
902
903
904
905
906
907
908
909 private void finishBlockAndWriteHeaderAndData(DataOutputStream out)
910 throws IOException {
911 ensureBlockReady();
912 out.write(onDiskBytesWithHeader);
913 out.write(onDiskChecksum);
914 }
915
916
917
918
919
920
921
922
923
924
925
926 byte[] getHeaderAndDataForTest() throws IOException {
927 ensureBlockReady();
928
929
930 byte[] output =
931 new byte[onDiskBytesWithHeader.length
932 + onDiskChecksum.length];
933 System.arraycopy(onDiskBytesWithHeader, 0, output, 0,
934 onDiskBytesWithHeader.length);
935 System.arraycopy(onDiskChecksum, 0, output,
936 onDiskBytesWithHeader.length, onDiskChecksum.length);
937 return output;
938 }
939
940
941
942
943 public void release() {
944 if (dataBlockEncodingCtx != null) {
945 dataBlockEncodingCtx.close();
946 dataBlockEncodingCtx = null;
947 }
948 if (defaultBlockEncodingCtx != null) {
949 defaultBlockEncodingCtx.close();
950 defaultBlockEncodingCtx = null;
951 }
952 }
953
954
955
956
957
958
959
960
961
962 int getOnDiskSizeWithoutHeader() {
963 expectState(State.BLOCK_READY);
964 return onDiskBytesWithHeader.length + onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE;
965 }
966
967
968
969
970
971
972
973
974 int getOnDiskSizeWithHeader() {
975 expectState(State.BLOCK_READY);
976 return onDiskBytesWithHeader.length + onDiskChecksum.length;
977 }
978
979
980
981
982 int getUncompressedSizeWithoutHeader() {
983 expectState(State.BLOCK_READY);
984 return uncompressedBytesWithHeader.length - HConstants.HFILEBLOCK_HEADER_SIZE;
985 }
986
987
988
989
990 int getUncompressedSizeWithHeader() {
991 expectState(State.BLOCK_READY);
992 return uncompressedBytesWithHeader.length;
993 }
994
995
996 public boolean isWriting() {
997 return state == State.WRITING;
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007 public int blockSizeWritten() {
1008 if (state != State.WRITING)
1009 return 0;
1010 return userDataStream.size();
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 ByteBuffer getUncompressedBufferWithHeader() {
1022 expectState(State.BLOCK_READY);
1023 return ByteBuffer.wrap(uncompressedBytesWithHeader);
1024 }
1025
1026 private void expectState(State expectedState) {
1027 if (state != expectedState) {
1028 throw new IllegalStateException("Expected state: " + expectedState +
1029 ", actual state: " + state);
1030 }
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 public void writeBlock(BlockWritable bw, FSDataOutputStream out)
1044 throws IOException {
1045 bw.writeToBlock(startWriting(bw.getBlockType()));
1046 writeHeaderAndData(out);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056 public HFileBlock getBlockForCaching() {
1057 return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
1058 getUncompressedSizeWithoutHeader(), prevOffset,
1059 getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset,
1060 includesMemstoreTS, MINOR_VERSION_WITH_CHECKSUM,
1061 0, ChecksumType.NULL.getCode(),
1062 onDiskBytesWithHeader.length + onDiskChecksum.length);
1063 }
1064 }
1065
1066
1067 public interface BlockWritable {
1068
1069
1070 BlockType getBlockType();
1071
1072
1073
1074
1075
1076
1077
1078 void writeToBlock(DataOutput out) throws IOException;
1079 }
1080
1081
1082
1083
1084 public interface BlockIterator {
1085
1086
1087
1088
1089 HFileBlock nextBlock() throws IOException;
1090
1091
1092
1093
1094
1095 HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException;
1096 }
1097
1098
1099 public interface FSReader {
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 HFileBlock readBlockData(long offset, long onDiskSize,
1113 int uncompressedSize, boolean pread) throws IOException;
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 BlockIterator blockRange(long startOffset, long endOffset);
1125
1126
1127 void closeStreams() throws IOException;
1128 }
1129
1130
1131
1132
1133
1134 private abstract static class AbstractFSReader implements FSReader {
1135
1136 protected Compression.Algorithm compressAlgo;
1137
1138
1139 protected long fileSize;
1140
1141
1142 private int minorVersion;
1143
1144
1145 protected final int hdrSize;
1146
1147
1148 protected HFileSystem hfs;
1149
1150
1151 protected Path path;
1152
1153 private final Lock streamLock = new ReentrantLock();
1154
1155
1156 public static final int DEFAULT_BUFFER_SIZE = 1 << 20;
1157
1158 public AbstractFSReader(Algorithm compressAlgo, long fileSize, int minorVersion,
1159 HFileSystem hfs, Path path) throws IOException {
1160 this.compressAlgo = compressAlgo;
1161 this.fileSize = fileSize;
1162 this.minorVersion = minorVersion;
1163 this.hfs = hfs;
1164 this.path = path;
1165 this.hdrSize = headerSize(minorVersion);
1166 }
1167
1168 @Override
1169 public BlockIterator blockRange(final long startOffset,
1170 final long endOffset) {
1171 return new BlockIterator() {
1172 private long offset = startOffset;
1173
1174 @Override
1175 public HFileBlock nextBlock() throws IOException {
1176 if (offset >= endOffset)
1177 return null;
1178 HFileBlock b = readBlockData(offset, -1, -1, false);
1179 offset += b.getOnDiskSizeWithHeader();
1180 return b;
1181 }
1182
1183 @Override
1184 public HFileBlock nextBlockWithBlockType(BlockType blockType)
1185 throws IOException {
1186 HFileBlock blk = nextBlock();
1187 if (blk.getBlockType() != blockType) {
1188 throw new IOException("Expected block of type " + blockType
1189 + " but found " + blk.getBlockType());
1190 }
1191 return blk;
1192 }
1193 };
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211 protected int readAtOffset(FSDataInputStream istream,
1212 byte[] dest, int destOffset, int size,
1213 boolean peekIntoNextBlock, long fileOffset, boolean pread)
1214 throws IOException {
1215 if (peekIntoNextBlock &&
1216 destOffset + size + hdrSize > dest.length) {
1217
1218
1219 throw new IOException("Attempted to read " + size + " bytes and " +
1220 hdrSize + " bytes of next header into a " + dest.length +
1221 "-byte array at offset " + destOffset);
1222 }
1223
1224 if (!pread && streamLock.tryLock()) {
1225
1226 try {
1227 istream.seek(fileOffset);
1228
1229 long realOffset = istream.getPos();
1230 if (realOffset != fileOffset) {
1231 throw new IOException("Tried to seek to " + fileOffset + " to "
1232 + "read " + size + " bytes, but pos=" + realOffset
1233 + " after seek");
1234 }
1235
1236 if (!peekIntoNextBlock) {
1237 IOUtils.readFully(istream, dest, destOffset, size);
1238 return -1;
1239 }
1240
1241
1242 if (!readWithExtra(istream, dest, destOffset, size, hdrSize))
1243 return -1;
1244 } finally {
1245 streamLock.unlock();
1246 }
1247 } else {
1248
1249 int extraSize = peekIntoNextBlock ? hdrSize : 0;
1250
1251 int ret = istream.read(fileOffset, dest, destOffset, size + extraSize);
1252 if (ret < size) {
1253 throw new IOException("Positional read of " + size + " bytes " +
1254 "failed at offset " + fileOffset + " (returned " + ret + ")");
1255 }
1256
1257 if (ret == size || ret < size + extraSize) {
1258
1259 return -1;
1260 }
1261 }
1262
1263 assert peekIntoNextBlock;
1264 return Bytes.toInt(dest, destOffset + size + BlockType.MAGIC_LENGTH) +
1265 hdrSize;
1266 }
1267
1268
1269
1270
1271 protected int getMinorVersion() {
1272 return minorVersion;
1273 }
1274 }
1275
1276
1277
1278
1279
1280 private static class PrefetchedHeader {
1281 long offset = -1;
1282 byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
1283 ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE);
1284 }
1285
1286
1287 static class FSReaderV2 extends AbstractFSReader {
1288
1289
1290 protected FSDataInputStreamWrapper streamWrapper;
1291
1292
1293 protected boolean includesMemstoreTS;
1294
1295
1296 protected HFileDataBlockEncoder dataBlockEncoder =
1297 NoOpDataBlockEncoder.INSTANCE;
1298
1299 private HFileBlockDecodingContext encodedBlockDecodingCtx;
1300
1301 private HFileBlockDefaultDecodingContext defaultDecodingCtx;
1302
1303 private ThreadLocal<PrefetchedHeader> prefetchedHeaderForThread =
1304 new ThreadLocal<PrefetchedHeader>() {
1305 @Override
1306 public PrefetchedHeader initialValue() {
1307 return new PrefetchedHeader();
1308 }
1309 };
1310
1311 public FSReaderV2(FSDataInputStreamWrapper stream, Algorithm compressAlgo, long fileSize,
1312 int minorVersion, HFileSystem hfs, Path path) throws IOException {
1313 super(compressAlgo, fileSize, minorVersion, hfs, path);
1314 this.streamWrapper = stream;
1315
1316 boolean forceNoHBaseChecksum = (this.getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM);
1317 this.streamWrapper.prepareForBlockReader(forceNoHBaseChecksum);
1318
1319 defaultDecodingCtx =
1320 new HFileBlockDefaultDecodingContext(compressAlgo);
1321 encodedBlockDecodingCtx =
1322 new HFileBlockDefaultDecodingContext(compressAlgo);
1323 }
1324
1325
1326
1327
1328
1329 FSReaderV2(FSDataInputStream istream, Algorithm compressAlgo,
1330 long fileSize) throws IOException {
1331 this(new FSDataInputStreamWrapper(istream), compressAlgo, fileSize,
1332 HFileReaderV2.MAX_MINOR_VERSION, null, null);
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 @Override
1347 public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL,
1348 int uncompressedSize, boolean pread) throws IOException {
1349
1350
1351
1352
1353
1354
1355 boolean doVerificationThruHBaseChecksum = streamWrapper.shouldUseHBaseChecksum();
1356 FSDataInputStream is = streamWrapper.getStream(doVerificationThruHBaseChecksum);
1357
1358 HFileBlock blk = readBlockDataInternal(is, offset,
1359 onDiskSizeWithHeaderL,
1360 uncompressedSize, pread,
1361 doVerificationThruHBaseChecksum);
1362 if (blk == null) {
1363 HFile.LOG.warn("HBase checksum verification failed for file " +
1364 path + " at offset " +
1365 offset + " filesize " + fileSize +
1366 ". Retrying read with HDFS checksums turned on...");
1367
1368 if (!doVerificationThruHBaseChecksum) {
1369 String msg = "HBase checksum verification failed for file " +
1370 path + " at offset " +
1371 offset + " filesize " + fileSize +
1372 " but this cannot happen because doVerify is " +
1373 doVerificationThruHBaseChecksum;
1374 HFile.LOG.warn(msg);
1375 throw new IOException(msg);
1376 }
1377 HFile.checksumFailures.incrementAndGet();
1378
1379
1380
1381
1382
1383
1384
1385 is = this.streamWrapper.fallbackToFsChecksum(CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD);
1386 doVerificationThruHBaseChecksum = false;
1387 blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL,
1388 uncompressedSize, pread,
1389 doVerificationThruHBaseChecksum);
1390 if (blk != null) {
1391 HFile.LOG.warn("HDFS checksum verification suceeded for file " +
1392 path + " at offset " +
1393 offset + " filesize " + fileSize);
1394 }
1395 }
1396 if (blk == null && !doVerificationThruHBaseChecksum) {
1397 String msg = "readBlockData failed, possibly due to " +
1398 "checksum verification failed for file " + path +
1399 " at offset " + offset + " filesize " + fileSize;
1400 HFile.LOG.warn(msg);
1401 throw new IOException(msg);
1402 }
1403
1404
1405
1406
1407
1408
1409
1410
1411 streamWrapper.checksumOk();
1412 return blk;
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 private HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
1429 long onDiskSizeWithHeaderL, int uncompressedSize, boolean pread,
1430 boolean verifyChecksum) throws IOException {
1431 if (offset < 0) {
1432 throw new IOException("Invalid offset=" + offset + " trying to read "
1433 + "block (onDiskSize=" + onDiskSizeWithHeaderL
1434 + ", uncompressedSize=" + uncompressedSize + ")");
1435 }
1436 if (uncompressedSize != -1) {
1437 throw new IOException("Version 2 block reader API does not need " +
1438 "the uncompressed size parameter");
1439 }
1440
1441 if ((onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1)
1442 || onDiskSizeWithHeaderL >= Integer.MAX_VALUE) {
1443 throw new IOException("Invalid onDisksize=" + onDiskSizeWithHeaderL
1444 + ": expected to be at least " + hdrSize
1445 + " and at most " + Integer.MAX_VALUE + ", or -1 (offset="
1446 + offset + ", uncompressedSize=" + uncompressedSize + ")");
1447 }
1448
1449 int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL;
1450
1451
1452
1453
1454
1455 PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get();
1456 ByteBuffer headerBuf = prefetchedHeader.offset == offset ?
1457 prefetchedHeader.buf : null;
1458
1459 int nextBlockOnDiskSize = 0;
1460
1461 byte[] onDiskBlock = null;
1462
1463 HFileBlock b = null;
1464 if (onDiskSizeWithHeader > 0) {
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 int preReadHeaderSize = headerBuf == null ? 0 : hdrSize;
1475 onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize];
1476 nextBlockOnDiskSize = readAtOffset(is, onDiskBlock,
1477 preReadHeaderSize, onDiskSizeWithHeader - preReadHeaderSize,
1478 true, offset + preReadHeaderSize, pread);
1479 if (headerBuf != null) {
1480
1481
1482 System.arraycopy(headerBuf.array(),
1483 headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize);
1484 } else {
1485 headerBuf = ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
1486 }
1487
1488
1489
1490
1491 try {
1492 b = new HFileBlock(headerBuf, getMinorVersion());
1493 } catch (IOException ex) {
1494
1495 throw new IOException("Failed to read compressed block at "
1496 + offset
1497 + ", onDiskSizeWithoutHeader="
1498 + onDiskSizeWithHeader
1499 + ", preReadHeaderSize="
1500 + hdrSize
1501 + ", header.length="
1502 + prefetchedHeader.header.length
1503 + ", header bytes: "
1504 + Bytes.toStringBinary(prefetchedHeader.header, 0,
1505 hdrSize), ex);
1506 }
1507
1508 int onDiskSizeWithoutHeader = onDiskSizeWithHeader - hdrSize;
1509 assert onDiskSizeWithoutHeader >= 0;
1510 b.validateOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader);
1511 } else {
1512
1513
1514
1515
1516
1517
1518
1519
1520 if (headerBuf == null) {
1521
1522
1523
1524
1525
1526 headerBuf = ByteBuffer.allocate(hdrSize);
1527 readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(),
1528 hdrSize, false, offset, pread);
1529 }
1530
1531 b = new HFileBlock(headerBuf, getMinorVersion());
1532 onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize];
1533 System.arraycopy(headerBuf.array(),
1534 headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize);
1535 nextBlockOnDiskSize =
1536 readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader()
1537 - hdrSize, true, offset + hdrSize, pread);
1538 onDiskSizeWithHeader = b.onDiskSizeWithoutHeader + hdrSize;
1539 }
1540
1541 boolean isCompressed =
1542 compressAlgo != null
1543 && compressAlgo != Compression.Algorithm.NONE;
1544 if (!isCompressed) {
1545 b.assumeUncompressed();
1546 }
1547
1548 if (verifyChecksum &&
1549 !validateBlockChecksum(b, onDiskBlock, hdrSize)) {
1550 return null;
1551 }
1552
1553 if (isCompressed) {
1554
1555 b.allocateBuffer(nextBlockOnDiskSize > 0);
1556 if (b.blockType == BlockType.ENCODED_DATA) {
1557 encodedBlockDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(),
1558 b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock,
1559 hdrSize);
1560 } else {
1561 defaultDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(),
1562 b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock,
1563 hdrSize);
1564 }
1565 if (nextBlockOnDiskSize > 0) {
1566
1567 System.arraycopy(onDiskBlock, onDiskSizeWithHeader, b.buf.array(),
1568 b.buf.arrayOffset() + hdrSize
1569 + b.uncompressedSizeWithoutHeader + b.totalChecksumBytes(),
1570 hdrSize);
1571 }
1572 } else {
1573
1574
1575
1576
1577 b = new HFileBlock(ByteBuffer.wrap(onDiskBlock, 0,
1578 onDiskSizeWithHeader), getMinorVersion());
1579 }
1580
1581 b.nextBlockOnDiskSizeWithHeader = nextBlockOnDiskSize;
1582
1583
1584 if (b.nextBlockOnDiskSizeWithHeader > 0) {
1585 prefetchedHeader.offset = offset + b.getOnDiskSizeWithHeader();
1586 System.arraycopy(onDiskBlock, onDiskSizeWithHeader,
1587 prefetchedHeader.header, 0, hdrSize);
1588 }
1589
1590 b.includesMemstoreTS = includesMemstoreTS;
1591 b.offset = offset;
1592 return b;
1593 }
1594
1595 void setIncludesMemstoreTS(boolean enabled) {
1596 includesMemstoreTS = enabled;
1597 }
1598
1599 void setDataBlockEncoder(HFileDataBlockEncoder encoder) {
1600 this.dataBlockEncoder = encoder;
1601 encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(
1602 this.compressAlgo);
1603 }
1604
1605
1606
1607
1608
1609
1610
1611 protected boolean validateBlockChecksum(HFileBlock block,
1612 byte[] data, int hdrSize) throws IOException {
1613 return ChecksumUtil.validateBlockChecksum(path, block,
1614 data, hdrSize);
1615 }
1616
1617 @Override
1618 public void closeStreams() throws IOException {
1619 streamWrapper.close();
1620 }
1621 }
1622
1623 @Override
1624 public int getSerializedLength() {
1625 if (buf != null) {
1626 return this.buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE;
1627 }
1628 return 0;
1629 }
1630
1631 @Override
1632 public void serialize(ByteBuffer destination) {
1633 ByteBuffer dupBuf = this.buf.duplicate();
1634 dupBuf.rewind();
1635 destination.put(dupBuf);
1636 destination.putInt(this.minorVersion);
1637 destination.putLong(this.offset);
1638 destination.putInt(this.nextBlockOnDiskSizeWithHeader);
1639 destination.rewind();
1640 }
1641
1642 public void serializeExtraInfo(ByteBuffer destination) {
1643 destination.putInt(this.minorVersion);
1644 destination.putLong(this.offset);
1645 destination.putInt(this.nextBlockOnDiskSizeWithHeader);
1646 destination.rewind();
1647 }
1648
1649 @Override
1650 public CacheableDeserializer<Cacheable> getDeserializer() {
1651 return HFileBlock.blockDeserializer;
1652 }
1653
1654 @Override
1655 public boolean equals(Object comparison) {
1656 if (this == comparison) {
1657 return true;
1658 }
1659 if (comparison == null) {
1660 return false;
1661 }
1662 if (comparison.getClass() != this.getClass()) {
1663 return false;
1664 }
1665
1666 HFileBlock castedComparison = (HFileBlock) comparison;
1667
1668 if (castedComparison.blockType != this.blockType) {
1669 return false;
1670 }
1671 if (castedComparison.nextBlockOnDiskSizeWithHeader != this.nextBlockOnDiskSizeWithHeader) {
1672 return false;
1673 }
1674 if (castedComparison.offset != this.offset) {
1675 return false;
1676 }
1677 if (castedComparison.onDiskSizeWithoutHeader != this.onDiskSizeWithoutHeader) {
1678 return false;
1679 }
1680 if (castedComparison.prevBlockOffset != this.prevBlockOffset) {
1681 return false;
1682 }
1683 if (castedComparison.uncompressedSizeWithoutHeader != this.uncompressedSizeWithoutHeader) {
1684 return false;
1685 }
1686 if (this.buf.compareTo(castedComparison.buf) != 0) {
1687 return false;
1688 }
1689 if (this.buf.position() != castedComparison.buf.position()){
1690 return false;
1691 }
1692 if (this.buf.limit() != castedComparison.buf.limit()){
1693 return false;
1694 }
1695 return true;
1696 }
1697
1698 public boolean doesIncludeMemstoreTS() {
1699 return includesMemstoreTS;
1700 }
1701
1702 public DataBlockEncoding getDataBlockEncoding() {
1703 if (blockType == BlockType.ENCODED_DATA) {
1704 return DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
1705 }
1706 return DataBlockEncoding.NONE;
1707 }
1708
1709 byte getChecksumType() {
1710 return this.checksumType;
1711 }
1712
1713 int getBytesPerChecksum() {
1714 return this.bytesPerChecksum;
1715 }
1716
1717 int getOnDiskDataSizeWithHeader() {
1718 return this.onDiskDataSizeWithHeader;
1719 }
1720
1721 int getMinorVersion() {
1722 return this.minorVersion;
1723 }
1724
1725
1726
1727
1728
1729 int totalChecksumBytes() {
1730
1731
1732
1733
1734 if (minorVersion < MINOR_VERSION_WITH_CHECKSUM || this.bytesPerChecksum == 0) {
1735 return 0;
1736 }
1737 return (int)ChecksumUtil.numBytes(onDiskDataSizeWithHeader, bytesPerChecksum);
1738 }
1739
1740
1741
1742
1743 public int headerSize() {
1744 return headerSize(this.minorVersion);
1745 }
1746
1747
1748
1749
1750 public static int headerSize(int minorVersion) {
1751 if (minorVersion < MINOR_VERSION_WITH_CHECKSUM) {
1752 return HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
1753 }
1754 return HConstants.HFILEBLOCK_HEADER_SIZE;
1755 }
1756
1757
1758
1759
1760 public byte[] getDummyHeaderForVersion() {
1761 return getDummyHeaderForVersion(minorVersion);
1762 }
1763
1764
1765
1766
1767 static private byte[] getDummyHeaderForVersion(int minorVersion) {
1768 if (minorVersion < MINOR_VERSION_WITH_CHECKSUM) {
1769 return DUMMY_HEADER_NO_CHECKSUM;
1770 }
1771 return HConstants.HFILEBLOCK_DUMMY_HEADER;
1772 }
1773
1774
1775
1776
1777
1778
1779 static String toStringHeader(ByteBuffer buf) throws IOException {
1780 int offset = buf.arrayOffset();
1781 byte[] b = buf.array();
1782 long magic = Bytes.toLong(b, offset);
1783 BlockType bt = BlockType.read(buf);
1784 offset += Bytes.SIZEOF_LONG;
1785 int compressedBlockSizeNoHeader = Bytes.toInt(b, offset);
1786 offset += Bytes.SIZEOF_INT;
1787 int uncompressedBlockSizeNoHeader = Bytes.toInt(b, offset);
1788 offset += Bytes.SIZEOF_INT;
1789 long prevBlockOffset = Bytes.toLong(b, offset);
1790 offset += Bytes.SIZEOF_LONG;
1791 byte cksumtype = b[offset];
1792 offset += Bytes.SIZEOF_BYTE;
1793 long bytesPerChecksum = Bytes.toInt(b, offset);
1794 offset += Bytes.SIZEOF_INT;
1795 long onDiskDataSizeWithHeader = Bytes.toInt(b, offset);
1796 offset += Bytes.SIZEOF_INT;
1797 return " Header dump: magic: " + magic +
1798 " blockType " + bt +
1799 " compressedBlockSizeNoHeader " +
1800 compressedBlockSizeNoHeader +
1801 " uncompressedBlockSizeNoHeader " +
1802 uncompressedBlockSizeNoHeader +
1803 " prevBlockOffset " + prevBlockOffset +
1804 " checksumType " + ChecksumType.codeToType(cksumtype) +
1805 " bytesPerChecksum " + bytesPerChecksum +
1806 " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
1807 }
1808 }
1809