View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.avro;
20  
21  import java.io.IOException;
22  import java.nio.ByteBuffer;
23  import java.util.Collection;
24  import java.util.List;
25  
26  import org.apache.hadoop.hbase.ClusterStatus;
27  import org.apache.hadoop.hbase.HColumnDescriptor;
28  import org.apache.hadoop.hbase.HServerAddress;
29  import org.apache.hadoop.hbase.HServerInfo;
30  import org.apache.hadoop.hbase.HServerLoad;
31  import org.apache.hadoop.hbase.HTableDescriptor;
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.client.Delete;
34  import org.apache.hadoop.hbase.client.Get;
35  import org.apache.hadoop.hbase.client.Put;
36  import org.apache.hadoop.hbase.client.Result;
37  import org.apache.hadoop.hbase.client.Scan;
38  import org.apache.hadoop.hbase.io.hfile.Compression;
39  import org.apache.hadoop.hbase.util.Bytes;
40  import org.apache.hadoop.hbase.avro.generated.AClusterStatus;
41  import org.apache.hadoop.hbase.avro.generated.AColumn;
42  import org.apache.hadoop.hbase.avro.generated.AColumnValue;
43  import org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm;
44  import org.apache.hadoop.hbase.avro.generated.ADelete;
45  import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor;
46  import org.apache.hadoop.hbase.avro.generated.AGet;
47  import org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
48  import org.apache.hadoop.hbase.avro.generated.APut;
49  import org.apache.hadoop.hbase.avro.generated.ARegionLoad;
50  import org.apache.hadoop.hbase.avro.generated.AResult;
51  import org.apache.hadoop.hbase.avro.generated.AResultEntry;
52  import org.apache.hadoop.hbase.avro.generated.AScan;
53  import org.apache.hadoop.hbase.avro.generated.AServerAddress;
54  import org.apache.hadoop.hbase.avro.generated.AServerInfo;
55  import org.apache.hadoop.hbase.avro.generated.AServerLoad;
56  import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
57  
58  import org.apache.avro.Schema;
59  import org.apache.avro.generic.GenericArray;
60  import org.apache.avro.generic.GenericData;
61  import org.apache.avro.util.Utf8;
62  
63  public class AvroUtil {
64  
65    //
66    // Cluster metadata
67    //
68  
69    static public AServerAddress hsaToASA(HServerAddress hsa) throws IOException {
70      AServerAddress asa = new AServerAddress();
71      asa.hostname = new Utf8(hsa.getHostname());
72      asa.inetSocketAddress = new Utf8(hsa.getInetSocketAddress().toString());
73      asa.port = hsa.getPort();
74      return asa;
75    }
76  
77    static public ARegionLoad hrlToARL(HServerLoad.RegionLoad rl) throws IOException {
78      ARegionLoad arl = new ARegionLoad();
79      arl.memStoreSizeMB = rl.getMemStoreSizeMB();
80      arl.name = ByteBuffer.wrap(rl.getName());
81      arl.storefileIndexSizeMB = rl.getStorefileIndexSizeMB();
82      arl.storefiles = rl.getStorefiles();
83      arl.storefileSizeMB = rl.getStorefileSizeMB();
84      arl.stores = rl.getStores();
85      return arl;
86    }
87  
88    static public AServerLoad hslToASL(HServerLoad hsl) throws IOException {
89      AServerLoad asl = new AServerLoad();
90      asl.load = hsl.getLoad();
91      asl.maxHeapMB = hsl.getMaxHeapMB();
92      asl.memStoreSizeInMB = hsl.getMemStoreSizeInMB();
93      asl.numberOfRegions = hsl.getNumberOfRegions();
94      asl.numberOfRequests = hsl.getNumberOfRequests();
95  
96      Collection<HServerLoad.RegionLoad> regionLoads = hsl.getRegionsLoad();
97      Schema s = Schema.createArray(ARegionLoad.SCHEMA$);
98      GenericData.Array<ARegionLoad> aregionLoads = null;
99      if (regionLoads != null) {
100       aregionLoads = new GenericData.Array<ARegionLoad>(regionLoads.size(), s);
101       for (HServerLoad.RegionLoad rl : regionLoads) {
102 	aregionLoads.add(hrlToARL(rl));
103       }
104     } else {
105       aregionLoads = new GenericData.Array<ARegionLoad>(0, s);
106     }
107     asl.regionsLoad = aregionLoads;
108 
109     asl.storefileIndexSizeInMB = hsl.getStorefileIndexSizeInMB();
110     asl.storefiles = hsl.getStorefiles();
111     asl.storefileSizeInMB = hsl.getStorefileSizeInMB();
112     asl.usedHeapMB = hsl.getUsedHeapMB();
113     return asl;
114   }
115 
116   static public AServerInfo hsiToASI(HServerInfo hsi) throws IOException {
117     AServerInfo asi = new AServerInfo();
118     asi.infoPort = hsi.getInfoPort();
119     asi.load = hslToASL(hsi.getLoad());
120     asi.serverAddress = hsaToASA(hsi.getServerAddress());
121     asi.serverName = new Utf8(hsi.getServerName());
122     asi.startCode = hsi.getStartCode();
123     return asi;
124   }
125 
126   static public AClusterStatus csToACS(ClusterStatus cs) throws IOException {
127     AClusterStatus acs = new AClusterStatus();
128     acs.averageLoad = cs.getAverageLoad();
129     Collection<String> deadServerNames = cs.getDeadServerNames();
130     Schema stringArraySchema = Schema.createArray(Schema.create(Schema.Type.STRING));
131     GenericData.Array<Utf8> adeadServerNames = null;
132     if (deadServerNames != null) {
133       adeadServerNames = new GenericData.Array<Utf8>(deadServerNames.size(), stringArraySchema);
134       for (String deadServerName : deadServerNames) {
135 	adeadServerNames.add(new Utf8(deadServerName));
136       }
137     } else {
138       adeadServerNames = new GenericData.Array<Utf8>(0, stringArraySchema);
139     }
140     acs.deadServerNames = adeadServerNames;
141     acs.deadServers = cs.getDeadServers();
142     acs.hbaseVersion = new Utf8(cs.getHBaseVersion());
143     acs.regionsCount = cs.getRegionsCount();
144     acs.requestsCount = cs.getRequestsCount();
145     Collection<HServerInfo> hserverInfos = cs.getServerInfo();
146     Schema s = Schema.createArray(AServerInfo.SCHEMA$);
147     GenericData.Array<AServerInfo> aserverInfos = null;
148     if (hserverInfos != null) {
149       aserverInfos = new GenericData.Array<AServerInfo>(hserverInfos.size(), s);
150       for (HServerInfo hsi : hserverInfos) {
151 	aserverInfos.add(hsiToASI(hsi));
152       }
153     } else {
154       aserverInfos = new GenericData.Array<AServerInfo>(0, s);
155     }
156     acs.serverInfos = aserverInfos;
157     acs.servers = cs.getServers();
158     return acs;
159   }
160 
161   //
162   // Table metadata
163   //
164 
165   static public ATableDescriptor htdToATD(HTableDescriptor table) throws IOException {
166     ATableDescriptor atd = new ATableDescriptor();
167     atd.name = ByteBuffer.wrap(table.getName());
168     Collection<HColumnDescriptor> families = table.getFamilies();
169     Schema afdSchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
170     GenericData.Array<AFamilyDescriptor> afamilies = null;
171     if (families.size() > 0) {
172       afamilies = new GenericData.Array<AFamilyDescriptor>(families.size(), afdSchema);
173       for (HColumnDescriptor hcd : families) {
174 	AFamilyDescriptor afamily = hcdToAFD(hcd);
175         afamilies.add(afamily);
176       }
177     } else {
178       afamilies = new GenericData.Array<AFamilyDescriptor>(0, afdSchema);
179     }
180     atd.families = afamilies;
181     atd.maxFileSize = table.getMaxFileSize();
182     atd.memStoreFlushSize = table.getMemStoreFlushSize();
183     atd.rootRegion = table.isRootRegion();
184     atd.metaRegion = table.isMetaRegion();
185     atd.metaTable = table.isMetaTable();
186     atd.readOnly = table.isReadOnly();
187     atd.deferredLogFlush = table.isDeferredLogFlush();
188     return atd;
189   }
190 
191   static public HTableDescriptor atdToHTD(ATableDescriptor atd) throws IOException, AIllegalArgument {
192     HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes(atd.name));
193     if (atd.families != null && atd.families.size() > 0) {
194       for (AFamilyDescriptor afd : atd.families) {
195 	htd.addFamily(afdToHCD(afd));
196       }
197     }
198     if (atd.maxFileSize != null) {
199       htd.setMaxFileSize(atd.maxFileSize);
200     }
201     if (atd.memStoreFlushSize != null) {
202       htd.setMemStoreFlushSize(atd.memStoreFlushSize);
203     }
204     if (atd.readOnly != null) {
205       htd.setReadOnly(atd.readOnly);
206     }
207     if (atd.deferredLogFlush != null) {
208       htd.setDeferredLogFlush(atd.deferredLogFlush);
209     }
210     if (atd.rootRegion != null || atd.metaRegion != null || atd.metaTable != null) {
211       AIllegalArgument aie = new AIllegalArgument();
212       aie.message = new Utf8("Can't set root or meta flag on create table.");
213       throw aie;
214     }
215     return htd;
216   }
217 
218   //
219   // Family metadata
220   //
221 
222   static public AFamilyDescriptor hcdToAFD(HColumnDescriptor hcd) throws IOException {
223     AFamilyDescriptor afamily = new AFamilyDescriptor();
224     afamily.name = ByteBuffer.wrap(hcd.getName());
225     String compressionAlgorithm = hcd.getCompressionType().getName();
226     if (compressionAlgorithm == "LZO") {
227       afamily.compression = ACompressionAlgorithm.LZO;
228     } else if (compressionAlgorithm == "GZ") {
229       afamily.compression = ACompressionAlgorithm.GZ;
230     } else {
231       afamily.compression = ACompressionAlgorithm.NONE;
232     }
233     afamily.maxVersions = hcd.getMaxVersions();
234     afamily.blocksize = hcd.getBlocksize();
235     afamily.inMemory = hcd.isInMemory();
236     afamily.timeToLive = hcd.getTimeToLive();
237     afamily.blockCacheEnabled = hcd.isBlockCacheEnabled();
238     return afamily;
239   }
240 
241   static public HColumnDescriptor afdToHCD(AFamilyDescriptor afd) throws IOException {
242     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(afd.name));
243 
244     ACompressionAlgorithm compressionAlgorithm = afd.compression;
245     if (compressionAlgorithm == ACompressionAlgorithm.LZO) {
246       hcd.setCompressionType(Compression.Algorithm.LZO);
247     } else if (compressionAlgorithm == ACompressionAlgorithm.GZ) {
248       hcd.setCompressionType(Compression.Algorithm.GZ);
249     } else {
250       hcd.setCompressionType(Compression.Algorithm.NONE);
251     }
252 
253     if (afd.maxVersions != null) {
254       hcd.setMaxVersions(afd.maxVersions);
255     }
256 
257     if (afd.blocksize != null) {
258       hcd.setBlocksize(afd.blocksize);
259     }
260 
261     if (afd.inMemory != null) {
262       hcd.setInMemory(afd.inMemory);
263     }
264 
265     if (afd.timeToLive != null) {
266       hcd.setTimeToLive(afd.timeToLive);
267     }
268 
269     if (afd.blockCacheEnabled != null) {
270       hcd.setBlockCacheEnabled(afd.blockCacheEnabled);
271     }
272     return hcd;
273   }
274 
275   //
276   // Single-Row DML (Get)
277   //
278 
279   // TODO(hammer): More concise idiom than if not null assign?
280   static public Get agetToGet(AGet aget) throws IOException {
281     Get get = new Get(Bytes.toBytes(aget.row));
282     if (aget.columns != null) {
283       for (AColumn acolumn : aget.columns) {
284 	if (acolumn.qualifier != null) {
285 	  get.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
286 	} else {
287 	  get.addFamily(Bytes.toBytes(acolumn.family));
288 	}
289       }
290     }
291     if (aget.timestamp != null) {
292       get.setTimeStamp(aget.timestamp);
293     }
294     if (aget.timerange != null) {
295       get.setTimeRange(aget.timerange.minStamp, aget.timerange.maxStamp);
296     }
297     if (aget.maxVersions != null) {
298       get.setMaxVersions(aget.maxVersions);
299     }
300     return get;
301   }
302 
303   // TODO(hammer): Pick one: Timestamp or TimeStamp
304   static public AResult resultToAResult(Result result) {
305     AResult aresult = new AResult();
306     aresult.row = ByteBuffer.wrap(result.getRow());
307     Schema s = Schema.createArray(AResultEntry.SCHEMA$);
308     GenericData.Array<AResultEntry> entries = null;
309     List<KeyValue> resultKeyValues = result.list();
310     if (resultKeyValues != null && resultKeyValues.size() > 0) {
311       entries = new GenericData.Array<AResultEntry>(resultKeyValues.size(), s);
312       for (KeyValue resultKeyValue : resultKeyValues) {
313 	AResultEntry entry = new AResultEntry();
314 	entry.family = ByteBuffer.wrap(resultKeyValue.getFamily());
315 	entry.qualifier = ByteBuffer.wrap(resultKeyValue.getQualifier());
316 	entry.value = ByteBuffer.wrap(resultKeyValue.getValue());
317 	entry.timestamp = resultKeyValue.getTimestamp();
318 	entries.add(entry);
319       }
320     } else {
321       entries = new GenericData.Array<AResultEntry>(0, s);
322     }
323     aresult.entries = entries;
324     return aresult;
325   }
326 
327   //
328   // Single-Row DML (Put)
329   //
330 
331   static public Put aputToPut(APut aput) throws IOException {
332     Put put = new Put(Bytes.toBytes(aput.row));
333     for (AColumnValue acv : aput.columnValues) {
334       if (acv.timestamp != null) {
335         put.add(Bytes.toBytes(acv.family),
336                 Bytes.toBytes(acv.qualifier),
337                 acv.timestamp,
338 	        Bytes.toBytes(acv.value));
339       } else {
340         put.add(Bytes.toBytes(acv.family),
341                 Bytes.toBytes(acv.qualifier),
342 	        Bytes.toBytes(acv.value));
343       }
344     }
345     return put;
346   }
347 
348   //
349   // Single-Row DML (Delete)
350   //
351 
352   static public Delete adeleteToDelete(ADelete adelete) throws IOException {
353     Delete delete = new Delete(Bytes.toBytes(adelete.row));
354     if (adelete.columns != null) {
355       for (AColumn acolumn : adelete.columns) {
356 	if (acolumn.qualifier != null) {
357 	  delete.deleteColumns(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
358 	} else {
359 	  delete.deleteFamily(Bytes.toBytes(acolumn.family));
360 	}
361       }
362     }
363     return delete;
364   }
365 
366   //
367   // Multi-row DML (Scan)
368   //
369 
370   static public Scan ascanToScan(AScan ascan) throws IOException {
371     Scan scan = new Scan();
372     if (ascan.startRow != null) {
373       scan.setStartRow(Bytes.toBytes(ascan.startRow));
374     }
375     if (ascan.stopRow != null) {
376       scan.setStopRow(Bytes.toBytes(ascan.stopRow));
377     }
378     if (ascan.columns != null) {
379       for (AColumn acolumn : ascan.columns) {
380 	if (acolumn.qualifier != null) {
381 	  scan.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
382 	} else {
383 	  scan.addFamily(Bytes.toBytes(acolumn.family));
384 	}
385       }
386     }
387     if (ascan.timestamp != null) {
388       scan.setTimeStamp(ascan.timestamp);
389     }
390     if (ascan.timerange != null) {
391       scan.setTimeRange(ascan.timerange.minStamp, ascan.timerange.maxStamp);
392     }
393     if (ascan.maxVersions != null) {
394       scan.setMaxVersions(ascan.maxVersions);
395     }
396     return scan;
397   }
398 
399   // TODO(hammer): Better to return null or empty array?
400   static public GenericArray<AResult> resultsToAResults(Result[] results) {
401     Schema s = Schema.createArray(AResult.SCHEMA$);
402     GenericData.Array<AResult> aresults = null;
403     if (results != null && results.length > 0) {
404       aresults = new GenericData.Array<AResult>(results.length, s);
405       for (Result result : results) {
406 	aresults.add(resultToAResult(result));
407       }
408     } else {
409       aresults = new GenericData.Array<AResult>(0, s);
410     }
411     return aresults;
412   }
413 }