1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.io.hfile;
19
20 import java.io.BufferedInputStream;
21 import java.io.ByteArrayInputStream;
22 import java.io.ByteArrayOutputStream;
23 import java.io.DataInputStream;
24 import java.io.DataOutput;
25 import java.io.DataOutputStream;
26 import java.io.IOException;
27 import java.io.InputStream;
28 import java.nio.ByteBuffer;
29 import java.util.concurrent.locks.Lock;
30 import java.util.concurrent.locks.ReentrantLock;
31
32 import org.apache.hadoop.classification.InterfaceAudience;
33 import org.apache.hadoop.fs.FSDataInputStream;
34 import org.apache.hadoop.fs.FSDataOutputStream;
35 import org.apache.hadoop.fs.Path;
36 import org.apache.hadoop.hbase.HConstants;
37 import org.apache.hadoop.hbase.fs.HFileSystem;
38 import org.apache.hadoop.hbase.io.compress.Compression;
39 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
40 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
41 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
42 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
43 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
44 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
45 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
46 import org.apache.hadoop.hbase.util.Bytes;
47 import org.apache.hadoop.hbase.util.ChecksumType;
48 import org.apache.hadoop.hbase.util.ClassSize;
49 import org.apache.hadoop.hbase.util.CompoundBloomFilter;
50 import org.apache.hadoop.io.IOUtils;
51
52 import com.google.common.base.Preconditions;
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85 @InterfaceAudience.Private
86 public class HFileBlock implements Cacheable {
87
88
89 static final int MINOR_VERSION_WITH_CHECKSUM = 1;
90
91
92 static final int MINOR_VERSION_NO_CHECKSUM = 0;
93
94
95
96
97
98
99 static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3;
100
101 public static final boolean FILL_HEADER = true;
102 public static final boolean DONT_FILL_HEADER = false;
103
104
105
106
107
108 public static final int ENCODED_HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE
109 + DataBlockEncoding.ID_SIZE;
110
111 static final byte[] DUMMY_HEADER_NO_CHECKSUM =
112 new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM];
113
114 public static final int BYTE_BUFFER_HEAP_SIZE = (int) ClassSize.estimateBase(
115 ByteBuffer.wrap(new byte[0], 0, 0).getClass(), false);
116
117
118 public static final int EXTRA_SERIALIZATION_SPACE = 2 * Bytes.SIZEOF_INT
119 + Bytes.SIZEOF_LONG;
120
121
122
123
124 static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT;
125
126 private static final CacheableDeserializer<Cacheable> blockDeserializer =
127 new CacheableDeserializer<Cacheable>() {
128 public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{
129 buf.limit(buf.limit() - HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind();
130 ByteBuffer newByteBuffer;
131 if (reuse) {
132 newByteBuffer = buf.slice();
133 } else {
134 newByteBuffer = ByteBuffer.allocate(buf.limit());
135 newByteBuffer.put(buf);
136 }
137 buf.position(buf.limit());
138 buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE);
139 int minorVersion=buf.getInt();
140 HFileBlock ourBuffer = new HFileBlock(newByteBuffer, minorVersion);
141 ourBuffer.offset = buf.getLong();
142 ourBuffer.nextBlockOnDiskSizeWithHeader = buf.getInt();
143 return ourBuffer;
144 }
145
146 @Override
147 public int getDeserialiserIdentifier() {
148 return deserializerIdentifier;
149 }
150
151 @Override
152 public HFileBlock deserialize(ByteBuffer b) throws IOException {
153 return deserialize(b, false);
154 }
155 };
156 private static final int deserializerIdentifier;
157 static {
158 deserializerIdentifier = CacheableDeserializerIdManager
159 .registerDeserializer(blockDeserializer);
160 }
161
162 private BlockType blockType;
163
164
165 private int onDiskSizeWithoutHeader;
166
167
168 private final int uncompressedSizeWithoutHeader;
169
170
171 private final long prevBlockOffset;
172
173
174 private final byte checksumType;
175
176
177 private final int bytesPerChecksum;
178
179
180 private final int onDiskDataSizeWithHeader;
181
182
183 private final int minorVersion;
184
185
186 private ByteBuffer buf;
187
188
189 private boolean includesMemstoreTS;
190
191
192
193
194
195 private long offset = -1;
196
197
198
199
200
201
202 private int nextBlockOnDiskSizeWithHeader = -1;
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
229 int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer buf,
230 boolean fillHeader, long offset, boolean includesMemstoreTS,
231 int minorVersion, int bytesPerChecksum, byte checksumType,
232 int onDiskDataSizeWithHeader) {
233 this.blockType = blockType;
234 this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
235 this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
236 this.prevBlockOffset = prevBlockOffset;
237 this.buf = buf;
238 if (fillHeader)
239 overwriteHeader();
240 this.offset = offset;
241 this.includesMemstoreTS = includesMemstoreTS;
242 this.minorVersion = minorVersion;
243 this.bytesPerChecksum = bytesPerChecksum;
244 this.checksumType = checksumType;
245 this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
246 }
247
248
249
250
251
252
253
254
255
256 HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
257 b.rewind();
258 blockType = BlockType.read(b);
259 onDiskSizeWithoutHeader = b.getInt();
260 uncompressedSizeWithoutHeader = b.getInt();
261 prevBlockOffset = b.getLong();
262 this.minorVersion = minorVersion;
263 if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
264 this.checksumType = b.get();
265 this.bytesPerChecksum = b.getInt();
266 this.onDiskDataSizeWithHeader = b.getInt();
267 } else {
268 this.checksumType = ChecksumType.NULL.getCode();
269 this.bytesPerChecksum = 0;
270 this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
271 HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
272 }
273 buf = b;
274 buf.rewind();
275 }
276
277 public BlockType getBlockType() {
278 return blockType;
279 }
280
281
282 public short getDataBlockEncodingId() {
283 if (blockType != BlockType.ENCODED_DATA) {
284 throw new IllegalArgumentException("Querying encoder ID of a block " +
285 "of type other than " + BlockType.ENCODED_DATA + ": " + blockType);
286 }
287 return buf.getShort(headerSize());
288 }
289
290
291
292
293
294 public int getOnDiskSizeWithHeader() {
295 return onDiskSizeWithoutHeader + headerSize();
296 }
297
298
299
300
301
302
303
304
305
306 public int getOnDiskSizeWithoutHeader() {
307 return onDiskSizeWithoutHeader;
308 }
309
310
311
312
313
314 public int getUncompressedSizeWithoutHeader() {
315 return uncompressedSizeWithoutHeader;
316 }
317
318
319
320
321
322 public long getPrevBlockOffset() {
323 return prevBlockOffset;
324 }
325
326
327
328
329
330 private void overwriteHeader() {
331 buf.rewind();
332 blockType.write(buf);
333 buf.putInt(onDiskSizeWithoutHeader);
334 buf.putInt(uncompressedSizeWithoutHeader);
335 buf.putLong(prevBlockOffset);
336 }
337
338
339
340
341
342
343
344
345 public ByteBuffer getBufferWithoutHeader() {
346 return ByteBuffer.wrap(buf.array(), buf.arrayOffset() + headerSize(),
347 buf.limit() - headerSize() - totalChecksumBytes()).slice();
348 }
349
350
351
352
353
354
355
356
357
358
359 public ByteBuffer getBufferReadOnly() {
360 return ByteBuffer.wrap(buf.array(), buf.arrayOffset(),
361 buf.limit() - totalChecksumBytes()).slice();
362 }
363
364
365
366
367
368
369
370
371 public ByteBuffer getBufferReadOnlyWithHeader() {
372 return ByteBuffer.wrap(buf.array(), buf.arrayOffset(), buf.limit()).slice();
373 }
374
375
376
377
378
379
380
381 ByteBuffer getBufferWithHeader() {
382 ByteBuffer dupBuf = buf.duplicate();
383 dupBuf.rewind();
384 return dupBuf;
385 }
386
387 private void sanityCheckAssertion(long valueFromBuf, long valueFromField,
388 String fieldName) throws IOException {
389 if (valueFromBuf != valueFromField) {
390 throw new AssertionError(fieldName + " in the buffer (" + valueFromBuf
391 + ") is different from that in the field (" + valueFromField + ")");
392 }
393 }
394
395
396
397
398
399
400
401 void sanityCheck() throws IOException {
402 buf.rewind();
403
404 {
405 BlockType blockTypeFromBuf = BlockType.read(buf);
406 if (blockTypeFromBuf != blockType) {
407 throw new IOException("Block type stored in the buffer: " +
408 blockTypeFromBuf + ", block type field: " + blockType);
409 }
410 }
411
412 sanityCheckAssertion(buf.getInt(), onDiskSizeWithoutHeader,
413 "onDiskSizeWithoutHeader");
414
415 sanityCheckAssertion(buf.getInt(), uncompressedSizeWithoutHeader,
416 "uncompressedSizeWithoutHeader");
417
418 sanityCheckAssertion(buf.getLong(), prevBlockOffset, "prevBlocKOffset");
419 if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
420 sanityCheckAssertion(buf.get(), checksumType, "checksumType");
421 sanityCheckAssertion(buf.getInt(), bytesPerChecksum, "bytesPerChecksum");
422 sanityCheckAssertion(buf.getInt(), onDiskDataSizeWithHeader,
423 "onDiskDataSizeWithHeader");
424 }
425
426 int cksumBytes = totalChecksumBytes();
427 int hdrSize = headerSize();
428 int expectedBufLimit = uncompressedSizeWithoutHeader + headerSize() +
429 cksumBytes;
430 if (buf.limit() != expectedBufLimit) {
431 throw new AssertionError("Expected buffer limit " + expectedBufLimit
432 + ", got " + buf.limit());
433 }
434
435
436
437 int size = uncompressedSizeWithoutHeader + hdrSize + cksumBytes;
438 if (buf.capacity() != size &&
439 buf.capacity() != size + hdrSize) {
440 throw new AssertionError("Invalid buffer capacity: " + buf.capacity() +
441 ", expected " + size + " or " + (size + hdrSize));
442 }
443 }
444
445 @Override
446 public String toString() {
447 return "blockType="
448 + blockType
449 + ", onDiskSizeWithoutHeader="
450 + onDiskSizeWithoutHeader
451 + ", uncompressedSizeWithoutHeader="
452 + uncompressedSizeWithoutHeader
453 + ", prevBlockOffset="
454 + prevBlockOffset
455 + ", dataBeginsWith="
456 + Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(),
457 Math.min(32, buf.limit() - buf.arrayOffset() - headerSize()))
458 + ", fileOffset=" + offset;
459 }
460
461 private void validateOnDiskSizeWithoutHeader(
462 int expectedOnDiskSizeWithoutHeader) throws IOException {
463 if (onDiskSizeWithoutHeader != expectedOnDiskSizeWithoutHeader) {
464 String blockInfoMsg =
465 "Block offset: " + offset + ", data starts with: "
466 + Bytes.toStringBinary(buf.array(), buf.arrayOffset(),
467 buf.arrayOffset() + Math.min(32, buf.limit()));
468 throw new IOException("On-disk size without header provided is "
469 + expectedOnDiskSizeWithoutHeader + ", but block "
470 + "header contains " + onDiskSizeWithoutHeader + ". " +
471 blockInfoMsg);
472 }
473 }
474
475
476
477
478
479
480
481
482
483 private void allocateBuffer(boolean extraBytes) {
484 int cksumBytes = totalChecksumBytes();
485 int capacityNeeded = headerSize() + uncompressedSizeWithoutHeader +
486 cksumBytes +
487 (extraBytes ? headerSize() : 0);
488
489 ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded);
490
491
492 System.arraycopy(buf.array(), buf.arrayOffset(), newBuf.array(),
493 newBuf.arrayOffset(), headerSize());
494
495 buf = newBuf;
496 buf.limit(headerSize() + uncompressedSizeWithoutHeader + cksumBytes);
497 }
498
499
500 public void assumeUncompressed() throws IOException {
501 if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader +
502 totalChecksumBytes()) {
503 throw new IOException("Using no compression but "
504 + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", "
505 + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader
506 + ", numChecksumbytes=" + totalChecksumBytes());
507 }
508 }
509
510
511
512
513
514 public void expectType(BlockType expectedType) throws IOException {
515 if (blockType != expectedType) {
516 throw new IOException("Invalid block type: expected=" + expectedType
517 + ", actual=" + blockType);
518 }
519 }
520
521
522 public long getOffset() {
523 if (offset < 0) {
524 throw new IllegalStateException(
525 "HFile block offset not initialized properly");
526 }
527 return offset;
528 }
529
530
531
532
533 public DataInputStream getByteStream() {
534 return new DataInputStream(new ByteArrayInputStream(buf.array(),
535 buf.arrayOffset() + headerSize(), buf.limit() - headerSize()));
536 }
537
538 @Override
539 public long heapSize() {
540 long size = ClassSize.align(
541 ClassSize.OBJECT +
542
543 2 * ClassSize.REFERENCE +
544
545
546 6 * Bytes.SIZEOF_INT +
547
548 1 * Bytes.SIZEOF_BYTE +
549
550 2 * Bytes.SIZEOF_LONG +
551
552 Bytes.SIZEOF_BOOLEAN
553 );
554
555 if (buf != null) {
556
557 size += ClassSize.align(buf.capacity() + BYTE_BUFFER_HEAP_SIZE);
558 }
559
560 return ClassSize.align(size);
561 }
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578 public static boolean readWithExtra(InputStream in, byte buf[],
579 int bufOffset, int necessaryLen, int extraLen) throws IOException {
580 int bytesRemaining = necessaryLen + extraLen;
581 while (bytesRemaining > 0) {
582 int ret = in.read(buf, bufOffset, bytesRemaining);
583 if (ret == -1 && bytesRemaining <= extraLen) {
584
585 break;
586 }
587
588 if (ret < 0) {
589 throw new IOException("Premature EOF from inputStream (read "
590 + "returned " + ret + ", was trying to read " + necessaryLen
591 + " necessary bytes and " + extraLen + " extra bytes, "
592 + "successfully read "
593 + (necessaryLen + extraLen - bytesRemaining));
594 }
595 bufOffset += ret;
596 bytesRemaining -= ret;
597 }
598 return bytesRemaining <= 0;
599 }
600
601
602
603
604
605 public int getNextBlockOnDiskSizeWithHeader() {
606 return nextBlockOnDiskSizeWithHeader;
607 }
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 public static class Writer {
624
625 private enum State {
626 INIT,
627 WRITING,
628 BLOCK_READY
629 };
630
631
632 private State state = State.INIT;
633
634
635 private final HFileDataBlockEncoder dataBlockEncoder;
636
637 private HFileBlockEncodingContext dataBlockEncodingCtx;
638
639
640 private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx;
641
642
643
644
645
646
647
648 private ByteArrayOutputStream baosInMemory;
649
650
651
652
653
654
655 private BlockType blockType;
656
657
658
659
660
661 private DataOutputStream userDataStream;
662
663
664
665
666
667
668 private byte[] onDiskBytesWithHeader;
669
670
671
672
673
674
675
676 private byte[] onDiskChecksum;
677
678
679
680
681
682
683
684 private byte[] uncompressedBytesWithHeader;
685
686
687
688
689
690 private long startOffset;
691
692
693
694
695
696 private long[] prevOffsetByType;
697
698
699 private long prevOffset;
700
701
702 private boolean includesMemstoreTS;
703
704
705 private ChecksumType checksumType;
706 private int bytesPerChecksum;
707
708
709
710
711
712
713
714 public Writer(Compression.Algorithm compressionAlgorithm,
715 HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS,
716 ChecksumType checksumType, int bytesPerChecksum) {
717 this.dataBlockEncoder = dataBlockEncoder != null
718 ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
719 defaultBlockEncodingCtx =
720 new HFileBlockDefaultEncodingContext(compressionAlgorithm, null, HConstants.HFILEBLOCK_DUMMY_HEADER);
721 dataBlockEncodingCtx =
722 this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
723 compressionAlgorithm, HConstants.HFILEBLOCK_DUMMY_HEADER);
724
725 if (bytesPerChecksum < HConstants.HFILEBLOCK_HEADER_SIZE) {
726 throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
727 " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
728 bytesPerChecksum);
729 }
730
731 baosInMemory = new ByteArrayOutputStream();
732
733 prevOffsetByType = new long[BlockType.values().length];
734 for (int i = 0; i < prevOffsetByType.length; ++i)
735 prevOffsetByType[i] = -1;
736
737 this.includesMemstoreTS = includesMemstoreTS;
738 this.checksumType = checksumType;
739 this.bytesPerChecksum = bytesPerChecksum;
740 }
741
742
743
744
745
746
747
748 public DataOutputStream startWriting(BlockType newBlockType)
749 throws IOException {
750 if (state == State.BLOCK_READY && startOffset != -1) {
751
752
753 prevOffsetByType[blockType.getId()] = startOffset;
754 }
755
756 startOffset = -1;
757 blockType = newBlockType;
758
759 baosInMemory.reset();
760 baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
761
762 state = State.WRITING;
763
764
765 userDataStream = new DataOutputStream(baosInMemory);
766 return userDataStream;
767 }
768
769
770
771
772
773
774
775
776 DataOutputStream getUserDataStream() {
777 expectState(State.WRITING);
778 return userDataStream;
779 }
780
781
782
783
784
785 private void ensureBlockReady() throws IOException {
786 Preconditions.checkState(state != State.INIT,
787 "Unexpected state: " + state);
788
789 if (state == State.BLOCK_READY)
790 return;
791
792
793 finishBlock();
794 }
795
796
797
798
799
800
801
802 private void finishBlock() throws IOException {
803 userDataStream.flush();
804
805 uncompressedBytesWithHeader = baosInMemory.toByteArray();
806 prevOffset = prevOffsetByType[blockType.getId()];
807
808
809
810
811 state = State.BLOCK_READY;
812 if (blockType == BlockType.DATA) {
813 encodeDataBlockForDisk();
814 } else {
815 defaultBlockEncodingCtx.compressAfterEncoding(
816 uncompressedBytesWithHeader, blockType);
817 onDiskBytesWithHeader =
818 defaultBlockEncodingCtx.getOnDiskBytesWithHeader();
819 }
820
821 int numBytes = (int) ChecksumUtil.numBytes(
822 onDiskBytesWithHeader.length,
823 bytesPerChecksum);
824
825
826 putHeader(onDiskBytesWithHeader, 0,
827 onDiskBytesWithHeader.length + numBytes,
828 uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length);
829
830 putHeader(uncompressedBytesWithHeader, 0,
831 onDiskBytesWithHeader.length + numBytes,
832 uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length);
833
834 onDiskChecksum = new byte[numBytes];
835 ChecksumUtil.generateChecksums(
836 onDiskBytesWithHeader, 0, onDiskBytesWithHeader.length,
837 onDiskChecksum, 0, checksumType, bytesPerChecksum);
838 }
839
840
841
842
843
844 private void encodeDataBlockForDisk() throws IOException {
845
846 ByteBuffer rawKeyValues =
847 ByteBuffer.wrap(uncompressedBytesWithHeader, HConstants.HFILEBLOCK_HEADER_SIZE,
848 uncompressedBytesWithHeader.length - HConstants.HFILEBLOCK_HEADER_SIZE).slice();
849
850
851 dataBlockEncoder.beforeWriteToDisk(rawKeyValues,
852 includesMemstoreTS, dataBlockEncodingCtx, blockType);
853
854 uncompressedBytesWithHeader =
855 dataBlockEncodingCtx.getUncompressedBytesWithHeader();
856 onDiskBytesWithHeader =
857 dataBlockEncodingCtx.getOnDiskBytesWithHeader();
858 blockType = dataBlockEncodingCtx.getBlockType();
859 }
860
861
862
863
864
865
866
867
868
869 private void putHeader(byte[] dest, int offset, int onDiskSize,
870 int uncompressedSize, int onDiskDataSize) {
871 offset = blockType.put(dest, offset);
872 offset = Bytes.putInt(dest, offset, onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE);
873 offset = Bytes.putInt(dest, offset, uncompressedSize - HConstants.HFILEBLOCK_HEADER_SIZE);
874 offset = Bytes.putLong(dest, offset, prevOffset);
875 offset = Bytes.putByte(dest, offset, checksumType.getCode());
876 offset = Bytes.putInt(dest, offset, bytesPerChecksum);
877 Bytes.putInt(dest, offset, onDiskDataSize);
878 }
879
880
881
882
883
884
885
886
887
888 public void writeHeaderAndData(FSDataOutputStream out) throws IOException {
889 long offset = out.getPos();
890 if (startOffset != -1 && offset != startOffset) {
891 throw new IOException("A " + blockType + " block written to a "
892 + "stream twice, first at offset " + startOffset + ", then at "
893 + offset);
894 }
895 startOffset = offset;
896
897 finishBlockAndWriteHeaderAndData((DataOutputStream) out);
898 }
899
900
901
902
903
904
905
906
907
908
909 private void finishBlockAndWriteHeaderAndData(DataOutputStream out)
910 throws IOException {
911 ensureBlockReady();
912 out.write(onDiskBytesWithHeader);
913 out.write(onDiskChecksum);
914 }
915
916
917
918
919
920
921
922
923
924
925
926 byte[] getHeaderAndDataForTest() throws IOException {
927 ensureBlockReady();
928
929
930 byte[] output =
931 new byte[onDiskBytesWithHeader.length
932 + onDiskChecksum.length];
933 System.arraycopy(onDiskBytesWithHeader, 0, output, 0,
934 onDiskBytesWithHeader.length);
935 System.arraycopy(onDiskChecksum, 0, output,
936 onDiskBytesWithHeader.length, onDiskChecksum.length);
937 return output;
938 }
939
940
941
942
943 public void release() {
944 if (dataBlockEncodingCtx != null) {
945 dataBlockEncodingCtx.close();
946 dataBlockEncodingCtx = null;
947 }
948 if (defaultBlockEncodingCtx != null) {
949 defaultBlockEncodingCtx.close();
950 defaultBlockEncodingCtx = null;
951 }
952 }
953
954
955
956
957
958
959
960
961
962 int getOnDiskSizeWithoutHeader() {
963 expectState(State.BLOCK_READY);
964 return onDiskBytesWithHeader.length + onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE;
965 }
966
967
968
969
970
971
972
973
974 int getOnDiskSizeWithHeader() {
975 expectState(State.BLOCK_READY);
976 return onDiskBytesWithHeader.length + onDiskChecksum.length;
977 }
978
979
980
981
982 int getUncompressedSizeWithoutHeader() {
983 expectState(State.BLOCK_READY);
984 return uncompressedBytesWithHeader.length - HConstants.HFILEBLOCK_HEADER_SIZE;
985 }
986
987
988
989
990 int getUncompressedSizeWithHeader() {
991 expectState(State.BLOCK_READY);
992 return uncompressedBytesWithHeader.length;
993 }
994
995
996 public boolean isWriting() {
997 return state == State.WRITING;
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007 public int blockSizeWritten() {
1008 if (state != State.WRITING)
1009 return 0;
1010 return userDataStream.size();
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 ByteBuffer getUncompressedBufferWithHeader() {
1022 expectState(State.BLOCK_READY);
1023 return ByteBuffer.wrap(uncompressedBytesWithHeader);
1024 }
1025
1026 private void expectState(State expectedState) {
1027 if (state != expectedState) {
1028 throw new IllegalStateException("Expected state: " + expectedState +
1029 ", actual state: " + state);
1030 }
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 public void writeBlock(BlockWritable bw, FSDataOutputStream out)
1044 throws IOException {
1045 bw.writeToBlock(startWriting(bw.getBlockType()));
1046 writeHeaderAndData(out);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056 public HFileBlock getBlockForCaching() {
1057 return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
1058 getUncompressedSizeWithoutHeader(), prevOffset,
1059 getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset,
1060 includesMemstoreTS, MINOR_VERSION_WITH_CHECKSUM,
1061 0, ChecksumType.NULL.getCode(),
1062 onDiskBytesWithHeader.length + onDiskChecksum.length);
1063 }
1064 }
1065
1066
1067 public interface BlockWritable {
1068
1069
1070 BlockType getBlockType();
1071
1072
1073
1074
1075
1076
1077
1078 void writeToBlock(DataOutput out) throws IOException;
1079 }
1080
1081
1082
1083
1084 public interface BlockIterator {
1085
1086
1087
1088
1089 HFileBlock nextBlock() throws IOException;
1090
1091
1092
1093
1094
1095 HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException;
1096 }
1097
1098
1099 public interface FSReader {
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 HFileBlock readBlockData(long offset, long onDiskSize,
1113 int uncompressedSize, boolean pread) throws IOException;
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 BlockIterator blockRange(long startOffset, long endOffset);
1125 }
1126
1127
1128
1129
1130
1131 private abstract static class AbstractFSReader implements FSReader {
1132
1133
1134
1135 protected final FSDataInputStream istream;
1136
1137
1138
1139 protected final FSDataInputStream istreamNoFsChecksum;
1140
1141
1142 protected Compression.Algorithm compressAlgo;
1143
1144
1145 protected long fileSize;
1146
1147
1148 private int minorVersion;
1149
1150
1151 protected final int hdrSize;
1152
1153
1154 protected HFileSystem hfs;
1155
1156
1157 protected Path path;
1158
1159 private final Lock streamLock = new ReentrantLock();
1160
1161
1162 public static final int DEFAULT_BUFFER_SIZE = 1 << 20;
1163
1164 public AbstractFSReader(FSDataInputStream istream,
1165 FSDataInputStream istreamNoFsChecksum,
1166 Algorithm compressAlgo,
1167 long fileSize, int minorVersion, HFileSystem hfs, Path path)
1168 throws IOException {
1169 this.istream = istream;
1170 this.compressAlgo = compressAlgo;
1171 this.fileSize = fileSize;
1172 this.minorVersion = minorVersion;
1173 this.hfs = hfs;
1174 this.path = path;
1175 this.hdrSize = headerSize(minorVersion);
1176 this.istreamNoFsChecksum = istreamNoFsChecksum;
1177 }
1178
1179 @Override
1180 public BlockIterator blockRange(final long startOffset,
1181 final long endOffset) {
1182 return new BlockIterator() {
1183 private long offset = startOffset;
1184
1185 @Override
1186 public HFileBlock nextBlock() throws IOException {
1187 if (offset >= endOffset)
1188 return null;
1189 HFileBlock b = readBlockData(offset, -1, -1, false);
1190 offset += b.getOnDiskSizeWithHeader();
1191 return b;
1192 }
1193
1194 @Override
1195 public HFileBlock nextBlockWithBlockType(BlockType blockType)
1196 throws IOException {
1197 HFileBlock blk = nextBlock();
1198 if (blk.getBlockType() != blockType) {
1199 throw new IOException("Expected block of type " + blockType
1200 + " but found " + blk.getBlockType());
1201 }
1202 return blk;
1203 }
1204 };
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 protected int readAtOffset(FSDataInputStream istream,
1223 byte[] dest, int destOffset, int size,
1224 boolean peekIntoNextBlock, long fileOffset, boolean pread)
1225 throws IOException {
1226 if (peekIntoNextBlock &&
1227 destOffset + size + hdrSize > dest.length) {
1228
1229
1230 throw new IOException("Attempted to read " + size + " bytes and " +
1231 hdrSize + " bytes of next header into a " + dest.length +
1232 "-byte array at offset " + destOffset);
1233 }
1234
1235 if (!pread && streamLock.tryLock()) {
1236
1237 try {
1238 istream.seek(fileOffset);
1239
1240 long realOffset = istream.getPos();
1241 if (realOffset != fileOffset) {
1242 throw new IOException("Tried to seek to " + fileOffset + " to "
1243 + "read " + size + " bytes, but pos=" + realOffset
1244 + " after seek");
1245 }
1246
1247 if (!peekIntoNextBlock) {
1248 IOUtils.readFully(istream, dest, destOffset, size);
1249 return -1;
1250 }
1251
1252
1253 if (!readWithExtra(istream, dest, destOffset, size, hdrSize))
1254 return -1;
1255 } finally {
1256 streamLock.unlock();
1257 }
1258 } else {
1259
1260 int extraSize = peekIntoNextBlock ? hdrSize : 0;
1261
1262 int ret = istream.read(fileOffset, dest, destOffset, size + extraSize);
1263 if (ret < size) {
1264 throw new IOException("Positional read of " + size + " bytes " +
1265 "failed at offset " + fileOffset + " (returned " + ret + ")");
1266 }
1267
1268 if (ret == size || ret < size + extraSize) {
1269
1270 return -1;
1271 }
1272 }
1273
1274 assert peekIntoNextBlock;
1275 return Bytes.toInt(dest, destOffset + size + BlockType.MAGIC_LENGTH) +
1276 hdrSize;
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 protected InputStream createBufferedBoundedStream(long offset,
1290 int size, boolean pread) {
1291 return new BufferedInputStream(new BoundedRangeFileInputStream(istream,
1292 offset, size, pread), Math.min(DEFAULT_BUFFER_SIZE, size));
1293 }
1294
1295
1296
1297
1298 protected int getMinorVersion() {
1299 return minorVersion;
1300 }
1301 }
1302
1303
1304
1305
1306
1307 private static class PrefetchedHeader {
1308 long offset = -1;
1309 byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
1310 ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE);
1311 }
1312
1313
1314 static class FSReaderV2 extends AbstractFSReader {
1315
1316
1317 private final boolean useHBaseChecksumConfigured;
1318
1319
1320
1321
1322
1323 private volatile boolean useHBaseChecksum;
1324
1325
1326
1327 private volatile int checksumOffCount = -1;
1328
1329
1330 protected boolean includesMemstoreTS;
1331
1332
1333 protected HFileDataBlockEncoder dataBlockEncoder =
1334 NoOpDataBlockEncoder.INSTANCE;
1335
1336 private HFileBlockDecodingContext encodedBlockDecodingCtx;
1337
1338 private HFileBlockDefaultDecodingContext defaultDecodingCtx;
1339
1340 private ThreadLocal<PrefetchedHeader> prefetchedHeaderForThread =
1341 new ThreadLocal<PrefetchedHeader>() {
1342 @Override
1343 public PrefetchedHeader initialValue() {
1344 return new PrefetchedHeader();
1345 }
1346 };
1347
1348 public FSReaderV2(FSDataInputStream istream,
1349 FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
1350 long fileSize, int minorVersion, HFileSystem hfs, Path path)
1351 throws IOException {
1352 super(istream, istreamNoFsChecksum, compressAlgo, fileSize,
1353 minorVersion, hfs, path);
1354
1355 if (hfs != null) {
1356
1357
1358 useHBaseChecksum = hfs.useHBaseChecksum();
1359 } else {
1360
1361
1362
1363
1364 useHBaseChecksum = true;
1365 }
1366
1367
1368 if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
1369 useHBaseChecksum = false;
1370 }
1371 this.useHBaseChecksumConfigured = useHBaseChecksum;
1372 defaultDecodingCtx =
1373 new HFileBlockDefaultDecodingContext(compressAlgo);
1374 encodedBlockDecodingCtx =
1375 new HFileBlockDefaultDecodingContext(compressAlgo);
1376 }
1377
1378
1379
1380
1381
1382 FSReaderV2(FSDataInputStream istream, Algorithm compressAlgo,
1383 long fileSize) throws IOException {
1384 this(istream, istream, compressAlgo, fileSize,
1385 HFileReaderV2.MAX_MINOR_VERSION, null, null);
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 @Override
1400 public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL,
1401 int uncompressedSize, boolean pread) throws IOException {
1402
1403
1404
1405 FSDataInputStream is = this.istreamNoFsChecksum;
1406
1407
1408
1409
1410
1411
1412 boolean doVerificationThruHBaseChecksum = this.useHBaseChecksum;
1413 if (!doVerificationThruHBaseChecksum) {
1414 is = this.istream;
1415 }
1416
1417 HFileBlock blk = readBlockDataInternal(is, offset,
1418 onDiskSizeWithHeaderL,
1419 uncompressedSize, pread,
1420 doVerificationThruHBaseChecksum);
1421 if (blk == null) {
1422 HFile.LOG.warn("HBase checksum verification failed for file " +
1423 path + " at offset " +
1424 offset + " filesize " + fileSize +
1425 ". Retrying read with HDFS checksums turned on...");
1426
1427 if (!doVerificationThruHBaseChecksum) {
1428 String msg = "HBase checksum verification failed for file " +
1429 path + " at offset " +
1430 offset + " filesize " + fileSize +
1431 " but this cannot happen because doVerify is " +
1432 doVerificationThruHBaseChecksum;
1433 HFile.LOG.warn(msg);
1434 throw new IOException(msg);
1435 }
1436 HFile.checksumFailures.incrementAndGet();
1437
1438
1439
1440
1441
1442
1443
1444 this.checksumOffCount = CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD;
1445 this.useHBaseChecksum = false;
1446 doVerificationThruHBaseChecksum = false;
1447 is = this.istream;
1448 blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL,
1449 uncompressedSize, pread,
1450 doVerificationThruHBaseChecksum);
1451 if (blk != null) {
1452 HFile.LOG.warn("HDFS checksum verification suceeded for file " +
1453 path + " at offset " +
1454 offset + " filesize " + fileSize);
1455 }
1456 }
1457 if (blk == null && !doVerificationThruHBaseChecksum) {
1458 String msg = "readBlockData failed, possibly due to " +
1459 "checksum verification failed for file " + path +
1460 " at offset " + offset + " filesize " + fileSize;
1461 HFile.LOG.warn(msg);
1462 throw new IOException(msg);
1463 }
1464
1465
1466
1467
1468
1469
1470
1471
1472 if (!this.useHBaseChecksum && this.useHBaseChecksumConfigured) {
1473 if (this.checksumOffCount-- < 0) {
1474 this.useHBaseChecksum = true;
1475 }
1476 }
1477 return blk;
1478 }
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 private HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
1494 long onDiskSizeWithHeaderL, int uncompressedSize, boolean pread,
1495 boolean verifyChecksum) throws IOException {
1496 if (offset < 0) {
1497 throw new IOException("Invalid offset=" + offset + " trying to read "
1498 + "block (onDiskSize=" + onDiskSizeWithHeaderL
1499 + ", uncompressedSize=" + uncompressedSize + ")");
1500 }
1501 if (uncompressedSize != -1) {
1502 throw new IOException("Version 2 block reader API does not need " +
1503 "the uncompressed size parameter");
1504 }
1505
1506 if ((onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1)
1507 || onDiskSizeWithHeaderL >= Integer.MAX_VALUE) {
1508 throw new IOException("Invalid onDisksize=" + onDiskSizeWithHeaderL
1509 + ": expected to be at least " + hdrSize
1510 + " and at most " + Integer.MAX_VALUE + ", or -1 (offset="
1511 + offset + ", uncompressedSize=" + uncompressedSize + ")");
1512 }
1513
1514 int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL;
1515
1516
1517
1518
1519
1520 PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get();
1521 ByteBuffer headerBuf = prefetchedHeader.offset == offset ?
1522 prefetchedHeader.buf : null;
1523
1524 int nextBlockOnDiskSize = 0;
1525
1526 byte[] onDiskBlock = null;
1527
1528 HFileBlock b = null;
1529 if (onDiskSizeWithHeader > 0) {
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 int preReadHeaderSize = headerBuf == null ? 0 : hdrSize;
1540 onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize];
1541 nextBlockOnDiskSize = readAtOffset(is, onDiskBlock,
1542 preReadHeaderSize, onDiskSizeWithHeader - preReadHeaderSize,
1543 true, offset + preReadHeaderSize, pread);
1544 if (headerBuf != null) {
1545
1546
1547 System.arraycopy(headerBuf.array(),
1548 headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize);
1549 } else {
1550 headerBuf = ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
1551 }
1552
1553
1554
1555
1556 try {
1557 b = new HFileBlock(headerBuf, getMinorVersion());
1558 } catch (IOException ex) {
1559
1560 throw new IOException("Failed to read compressed block at "
1561 + offset
1562 + ", onDiskSizeWithoutHeader="
1563 + onDiskSizeWithHeader
1564 + ", preReadHeaderSize="
1565 + hdrSize
1566 + ", header.length="
1567 + prefetchedHeader.header.length
1568 + ", header bytes: "
1569 + Bytes.toStringBinary(prefetchedHeader.header, 0,
1570 hdrSize), ex);
1571 }
1572
1573 int onDiskSizeWithoutHeader = onDiskSizeWithHeader - hdrSize;
1574 assert onDiskSizeWithoutHeader >= 0;
1575 b.validateOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader);
1576 } else {
1577
1578
1579
1580
1581
1582
1583
1584
1585 if (headerBuf == null) {
1586
1587
1588
1589
1590
1591 headerBuf = ByteBuffer.allocate(hdrSize);
1592 readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(),
1593 hdrSize, false, offset, pread);
1594 }
1595
1596 b = new HFileBlock(headerBuf, getMinorVersion());
1597 onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize];
1598 System.arraycopy(headerBuf.array(),
1599 headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize);
1600 nextBlockOnDiskSize =
1601 readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader()
1602 - hdrSize, true, offset + hdrSize, pread);
1603 onDiskSizeWithHeader = b.onDiskSizeWithoutHeader + hdrSize;
1604 }
1605
1606 boolean isCompressed =
1607 compressAlgo != null
1608 && compressAlgo != Compression.Algorithm.NONE;
1609 if (!isCompressed) {
1610 b.assumeUncompressed();
1611 }
1612
1613 if (verifyChecksum &&
1614 !validateBlockChecksum(b, onDiskBlock, hdrSize)) {
1615 return null;
1616 }
1617
1618 if (isCompressed) {
1619
1620 b.allocateBuffer(nextBlockOnDiskSize > 0);
1621 if (b.blockType.equals(BlockType.ENCODED_DATA)) {
1622 encodedBlockDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(),
1623 b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock,
1624 hdrSize);
1625 } else {
1626 defaultDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(),
1627 b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock,
1628 hdrSize);
1629 }
1630 if (nextBlockOnDiskSize > 0) {
1631
1632 System.arraycopy(onDiskBlock, onDiskSizeWithHeader, b.buf.array(),
1633 b.buf.arrayOffset() + hdrSize
1634 + b.uncompressedSizeWithoutHeader + b.totalChecksumBytes(),
1635 hdrSize);
1636 }
1637 } else {
1638
1639
1640
1641
1642 b = new HFileBlock(ByteBuffer.wrap(onDiskBlock, 0,
1643 onDiskSizeWithHeader), getMinorVersion());
1644 }
1645
1646 b.nextBlockOnDiskSizeWithHeader = nextBlockOnDiskSize;
1647
1648
1649 if (b.nextBlockOnDiskSizeWithHeader > 0) {
1650 prefetchedHeader.offset = offset + b.getOnDiskSizeWithHeader();
1651 System.arraycopy(onDiskBlock, onDiskSizeWithHeader,
1652 prefetchedHeader.header, 0, hdrSize);
1653 }
1654
1655 b.includesMemstoreTS = includesMemstoreTS;
1656 b.offset = offset;
1657 return b;
1658 }
1659
1660 void setIncludesMemstoreTS(boolean enabled) {
1661 includesMemstoreTS = enabled;
1662 }
1663
1664 void setDataBlockEncoder(HFileDataBlockEncoder encoder) {
1665 this.dataBlockEncoder = encoder;
1666 encodedBlockDecodingCtx = encoder.newOnDiskDataBlockDecodingContext(
1667 this.compressAlgo);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676 protected boolean validateBlockChecksum(HFileBlock block,
1677 byte[] data, int hdrSize) throws IOException {
1678 return ChecksumUtil.validateBlockChecksum(path, block,
1679 data, hdrSize);
1680 }
1681 }
1682
1683 @Override
1684 public int getSerializedLength() {
1685 if (buf != null) {
1686 return this.buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE;
1687 }
1688 return 0;
1689 }
1690
1691 @Override
1692 public void serialize(ByteBuffer destination) {
1693 ByteBuffer dupBuf = this.buf.duplicate();
1694 dupBuf.rewind();
1695 destination.put(dupBuf);
1696 destination.putInt(this.minorVersion);
1697 destination.putLong(this.offset);
1698 destination.putInt(this.nextBlockOnDiskSizeWithHeader);
1699 destination.rewind();
1700 }
1701
1702 public void serializeExtraInfo(ByteBuffer destination) {
1703 destination.putInt(this.minorVersion);
1704 destination.putLong(this.offset);
1705 destination.putInt(this.nextBlockOnDiskSizeWithHeader);
1706 destination.rewind();
1707 }
1708
1709 @Override
1710 public CacheableDeserializer<Cacheable> getDeserializer() {
1711 return HFileBlock.blockDeserializer;
1712 }
1713
1714 @Override
1715 public boolean equals(Object comparison) {
1716 if (this == comparison) {
1717 return true;
1718 }
1719 if (comparison == null) {
1720 return false;
1721 }
1722 if (comparison.getClass() != this.getClass()) {
1723 return false;
1724 }
1725
1726 HFileBlock castedComparison = (HFileBlock) comparison;
1727
1728 if (castedComparison.blockType != this.blockType) {
1729 return false;
1730 }
1731 if (castedComparison.nextBlockOnDiskSizeWithHeader != this.nextBlockOnDiskSizeWithHeader) {
1732 return false;
1733 }
1734 if (castedComparison.offset != this.offset) {
1735 return false;
1736 }
1737 if (castedComparison.onDiskSizeWithoutHeader != this.onDiskSizeWithoutHeader) {
1738 return false;
1739 }
1740 if (castedComparison.prevBlockOffset != this.prevBlockOffset) {
1741 return false;
1742 }
1743 if (castedComparison.uncompressedSizeWithoutHeader != this.uncompressedSizeWithoutHeader) {
1744 return false;
1745 }
1746 if (this.buf.compareTo(castedComparison.buf) != 0) {
1747 return false;
1748 }
1749 if (this.buf.position() != castedComparison.buf.position()){
1750 return false;
1751 }
1752 if (this.buf.limit() != castedComparison.buf.limit()){
1753 return false;
1754 }
1755 return true;
1756 }
1757
1758 public boolean doesIncludeMemstoreTS() {
1759 return includesMemstoreTS;
1760 }
1761
1762 public DataBlockEncoding getDataBlockEncoding() {
1763 if (blockType == BlockType.ENCODED_DATA) {
1764 return DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
1765 }
1766 return DataBlockEncoding.NONE;
1767 }
1768
1769 byte getChecksumType() {
1770 return this.checksumType;
1771 }
1772
1773 int getBytesPerChecksum() {
1774 return this.bytesPerChecksum;
1775 }
1776
1777 int getOnDiskDataSizeWithHeader() {
1778 return this.onDiskDataSizeWithHeader;
1779 }
1780
1781 int getMinorVersion() {
1782 return this.minorVersion;
1783 }
1784
1785
1786
1787
1788
1789 int totalChecksumBytes() {
1790
1791
1792
1793
1794 if (minorVersion < MINOR_VERSION_WITH_CHECKSUM || this.bytesPerChecksum == 0) {
1795 return 0;
1796 }
1797 return (int)ChecksumUtil.numBytes(onDiskDataSizeWithHeader, bytesPerChecksum);
1798 }
1799
1800
1801
1802
1803 public int headerSize() {
1804 return headerSize(this.minorVersion);
1805 }
1806
1807
1808
1809
1810 static private int headerSize(int minorVersion) {
1811 if (minorVersion < MINOR_VERSION_WITH_CHECKSUM) {
1812 return HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
1813 }
1814 return HConstants.HFILEBLOCK_HEADER_SIZE;
1815 }
1816
1817
1818
1819
1820
1821
1822 static String toStringHeader(ByteBuffer buf) throws IOException {
1823 int offset = buf.arrayOffset();
1824 byte[] b = buf.array();
1825 long magic = Bytes.toLong(b, offset);
1826 BlockType bt = BlockType.read(buf);
1827 offset += Bytes.SIZEOF_LONG;
1828 int compressedBlockSizeNoHeader = Bytes.toInt(b, offset);
1829 offset += Bytes.SIZEOF_INT;
1830 int uncompressedBlockSizeNoHeader = Bytes.toInt(b, offset);
1831 offset += Bytes.SIZEOF_INT;
1832 long prevBlockOffset = Bytes.toLong(b, offset);
1833 offset += Bytes.SIZEOF_LONG;
1834 byte cksumtype = b[offset];
1835 offset += Bytes.SIZEOF_BYTE;
1836 long bytesPerChecksum = Bytes.toInt(b, offset);
1837 offset += Bytes.SIZEOF_INT;
1838 long onDiskDataSizeWithHeader = Bytes.toInt(b, offset);
1839 offset += Bytes.SIZEOF_INT;
1840 return " Header dump: magic: " + magic +
1841 " blockType " + bt +
1842 " compressedBlockSizeNoHeader " +
1843 compressedBlockSizeNoHeader +
1844 " uncompressedBlockSizeNoHeader " +
1845 uncompressedBlockSizeNoHeader +
1846 " prevBlockOffset " + prevBlockOffset +
1847 " checksumType " + ChecksumType.codeToType(cksumtype) +
1848 " bytesPerChecksum " + bytesPerChecksum +
1849 " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
1850 }
1851 }
1852