View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.codec.prefixtree.encode.column;
20  
21  import java.io.IOException;
22  import java.io.OutputStream;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.hadoop.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta;
28  import org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.Tokenizer;
29  import org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode;
30  import org.apache.hadoop.hbase.util.CollectionUtils;
31  import org.apache.hadoop.hbase.util.vint.UFIntTool;
32  
33  import com.google.common.collect.Lists;
34  
35  /**
36   * Takes the tokenized family or qualifier data and flattens it into a stream of bytes. The family
37   * section is written after the row section, and qualifier section after family section.
38   * <p/>
39   * The family and qualifier tries, or "column tries", are structured differently than the row trie.
40   * The trie cannot be reassembled without external data about the offsets of the leaf nodes, and
41   * these external pointers are stored in the nubs and leaves of the row trie. For each cell in a
42   * row, the row trie contains a list of offsets into the column sections (along with pointers to
43   * timestamps and other per-cell fields). These offsets point to the last column node/token that
44   * comprises the column name. To assemble the column name, the trie is traversed in reverse (right
45   * to left), with the rightmost tokens pointing to the start of their "parent" node which is the
46   * node to the left.
47   * <p/>
48   * This choice was made to reduce the size of the column trie by storing the minimum amount of
49   * offset data. As a result, to find a specific qualifier within a row, you must do a binary search
50   * of the column nodes, reassembling each one as you search. Future versions of the PrefixTree might
51   * encode the columns in both a forward and reverse trie, which would convert binary searches into
52   * more efficient trie searches which would be beneficial for wide rows.
53   */
54  @InterfaceAudience.Private
55  public class ColumnSectionWriter {
56  
57    public static final int EXPECTED_NUBS_PLUS_LEAVES = 100;
58  
59    /****************** fields ****************************/
60  
61    private PrefixTreeBlockMeta blockMeta;
62  
63    private boolean familyVsQualifier;
64    private Tokenizer tokenizer;
65    private int numBytes = 0;
66    private ArrayList<TokenizerNode> nonLeaves;
67    private ArrayList<TokenizerNode> leaves;
68    private ArrayList<TokenizerNode> allNodes;
69    private ArrayList<ColumnNodeWriter> columnNodeWriters;
70    private List<Integer> outputArrayOffsets;
71  
72  
73  	/*********************** construct *********************/
74  
75    public ColumnSectionWriter() {
76      this.nonLeaves = Lists.newArrayList();
77      this.leaves = Lists.newArrayList();
78      this.outputArrayOffsets = Lists.newArrayList();
79    }
80  
81    public ColumnSectionWriter(PrefixTreeBlockMeta blockMeta, Tokenizer builder,
82        boolean familyVsQualifier) {
83      this();// init collections
84      reconstruct(blockMeta, builder, familyVsQualifier);
85    }
86  
87    public void reconstruct(PrefixTreeBlockMeta blockMeta, Tokenizer builder,
88        boolean familyVsQualifier) {
89      this.blockMeta = blockMeta;
90      this.tokenizer = builder;
91      this.familyVsQualifier = familyVsQualifier;
92    }
93  
94    public void reset() {
95      numBytes = 0;
96      nonLeaves.clear();
97      leaves.clear();
98      outputArrayOffsets.clear();
99    }
100 
101 
102 	/****************** methods *******************************/
103 
104   public ColumnSectionWriter compile() {
105     if (familyVsQualifier) {
106       // do nothing. max family length fixed at Byte.MAX_VALUE
107     } else {
108       blockMeta.setMaxQualifierLength(tokenizer.getMaxElementLength());
109     }
110 
111     tokenizer.setNodeFirstInsertionIndexes();
112 
113     tokenizer.appendNodes(nonLeaves, true, false);
114 
115     tokenizer.appendNodes(leaves, false, true);
116 
117     allNodes = Lists.newArrayListWithCapacity(nonLeaves.size() + leaves.size());
118     allNodes.addAll(nonLeaves);
119     allNodes.addAll(leaves);
120 
121     columnNodeWriters = Lists.newArrayListWithCapacity(CollectionUtils.nullSafeSize(allNodes));
122     for (int i = 0; i < allNodes.size(); ++i) {
123       TokenizerNode node = allNodes.get(i);
124       columnNodeWriters.add(new ColumnNodeWriter(blockMeta, node, familyVsQualifier));
125     }
126 
127     // leaf widths are known at this point, so add them up
128     int totalBytesWithoutOffsets = 0;
129     for (int i = allNodes.size() - 1; i >= 0; --i) {
130       ColumnNodeWriter columnNodeWriter = columnNodeWriters.get(i);
131       // leaves store all but their first token byte
132       totalBytesWithoutOffsets += columnNodeWriter.getWidthUsingPlaceholderForOffsetWidth(0);
133     }
134 
135     // figure out how wide our offset FInts are
136     int parentOffsetWidth = 0;
137     while (true) {
138       ++parentOffsetWidth;
139       int numBytesFinder = totalBytesWithoutOffsets + parentOffsetWidth * allNodes.size();
140       if (numBytesFinder < UFIntTool.maxValueForNumBytes(parentOffsetWidth)) {
141         numBytes = numBytesFinder;
142         break;
143       }// it fits
144     }
145     if (familyVsQualifier) {
146       blockMeta.setFamilyOffsetWidth(parentOffsetWidth);
147     } else {
148       blockMeta.setQualifierOffsetWidth(parentOffsetWidth);
149     }
150 
151     int forwardIndex = 0;
152     for (int i = 0; i < allNodes.size(); ++i) {
153       TokenizerNode node = allNodes.get(i);
154       ColumnNodeWriter columnNodeWriter = columnNodeWriters.get(i);
155       int fullNodeWidth = columnNodeWriter
156           .getWidthUsingPlaceholderForOffsetWidth(parentOffsetWidth);
157       node.setOutputArrayOffset(forwardIndex);
158       columnNodeWriter.setTokenBytes(node.getToken());
159       if (node.isRoot()) {
160         columnNodeWriter.setParentStartPosition(0);
161       } else {
162         columnNodeWriter.setParentStartPosition(node.getParent().getOutputArrayOffset());
163       }
164       forwardIndex += fullNodeWidth;
165     }
166 
167     tokenizer.appendOutputArrayOffsets(outputArrayOffsets);
168 
169     return this;
170   }
171 
172   public void writeBytes(OutputStream os) throws IOException {
173     for (ColumnNodeWriter columnNodeWriter : columnNodeWriters) {
174       columnNodeWriter.writeBytes(os);
175     }
176   }
177 
178 
179   /************* get/set **************************/
180 
181   public ArrayList<ColumnNodeWriter> getColumnNodeWriters() {
182     return columnNodeWriters;
183   }
184 
185   public int getNumBytes() {
186     return numBytes;
187   }
188 
189   public int getOutputArrayOffset(int sortedIndex) {
190     return outputArrayOffsets.get(sortedIndex);
191   }
192 
193   public ArrayList<TokenizerNode> getNonLeaves() {
194     return nonLeaves;
195   }
196 
197   public ArrayList<TokenizerNode> getLeaves() {
198     return leaves;
199   }
200 
201 }