RAT (Release Audit Tool) results

The following document contains the results of RAT (Release Audit Tool).

*****************************************************
Summary
-------
Notes: 5
Binaries: 12
Archives: 0
Standards: 804

Apache Licensed: 720
Generated Documents: 0

JavaDocs are generated and so license header is optional
Generated files do not required license headers

84 Unknown Licenses

*******************************

Unapproved licenses:

  partitions_1311144423174
  .gitignore
  CHANGES.txt
  partitions_1311110117679
  src/site/site.xml
  src/site/site.vm
  src/site/resources/css/freebsd_docbook.css
  src/docbkx/build.xml
  src/docbkx/configuration.xml
  src/docbkx/getting_started.xml
  src/docbkx/preface.xml
  src/docbkx/shell.xml
  src/docbkx/developer.xml
  src/docbkx/performance.xml
  src/docbkx/upgrading.xml
  src/docbkx/troubleshooting.xml
  src/assembly/all.xml
  src/main/ruby/shell/commands/shutdown.rb
  src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd
  src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties
  src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties
  src/main/resources/hbase-webapps/static/hbase.css
  src/main/resources/hbase-webapps/master/master.jsp
  src/main/resources/hbase-webapps/master/table.jsp
  src/main/resources/hbase-webapps/master/zk.jsp
  src/main/resources/hbase-webapps/master/index.html
  src/main/resources/hbase-webapps/regionserver/index.html
  src/main/resources/hbase-webapps/regionserver/regionserver.jsp
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
  src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
  src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java
  src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr
  src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java
  src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java
  src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
  src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/Hbase.html
  src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/index.html
  src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css
  src/test/ruby/test_helper.rb
  src/test/resources/log4j.properties
  src/test/resources/mapred-queues.xml
  src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
  src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
  src/examples/mapreduce/index-builder-setup.rb
  partitions_1311186390587
  partitions_1311317807683
  pom.xml
  bin/local-master-backup.sh
  bin/local-regionservers.sh
  bin/set_meta_block_caching.rb
  conf/log4j.properties
  conf/hadoop-metrics.properties
  conf/regionservers

*******************************

Archives (+ indicates readable, $ unreadable): 

 
*****************************************************
  Files with Apache License headers will be marked AL
  Binary files (which do not require AL headers) will be marked B
  Compressed archives will be marked A
  Notices, licenses etc will be marked N
  B     .partitions_1311186390587.crc
  N     LICENSE.txt
 !????? partitions_1311144423174
 !????? .gitignore
 !????? CHANGES.txt
 !????? partitions_1311110117679
  AL    src/saveVersion.sh
 !????? src/site/site.xml
 !????? src/site/site.vm
 !????? src/site/resources/css/freebsd_docbook.css
  AL    src/site/resources/css/site.css
  B     src/site/resources/images/hbase_logo_med.gif
  B     src/site/resources/images/architecture.gif
  B     src/site/resources/images/hadoop-logo.jpg
  B     src/site/resources/images/favicon.ico
  B     src/site/resources/images/asf_logo_wide.png
  B     src/site/resources/images/hbase_small.gif
  B     src/site/resources/images/replication_overview.png
  AL    src/site/xdoc/replication.xml
  AL    src/site/xdoc/bulk-loads.xml
  AL    src/site/xdoc/cygwin.xml
  AL    src/site/xdoc/old_news.xml
  AL    src/site/xdoc/acid-semantics.xml
  AL    src/site/xdoc/pseudo-distributed.xml
  AL    src/site/xdoc/index.xml
  AL    src/site/xdoc/metrics.xml
 !????? src/docbkx/build.xml
 !????? src/docbkx/configuration.xml
  AL    src/docbkx/book.xml
 !????? src/docbkx/getting_started.xml
 !????? src/docbkx/preface.xml
 !????? src/docbkx/shell.xml
 !????? src/docbkx/developer.xml
 !????? src/docbkx/performance.xml
 !????? src/docbkx/upgrading.xml
 !????? src/docbkx/troubleshooting.xml
 !????? src/assembly/all.xml
  AL    src/main/ruby/hbase/admin.rb
  AL    src/main/ruby/hbase/hbase.rb
  AL    src/main/ruby/hbase/table.rb
  AL    src/main/ruby/hbase/replication_admin.rb
  AL    src/main/ruby/shell.rb
  AL    src/main/ruby/hbase.rb
  AL    src/main/ruby/shell/formatter.rb
  AL    src/main/ruby/shell/commands/create.rb
  AL    src/main/ruby/shell/commands/balance_switch.rb
  AL    src/main/ruby/shell/commands/major_compact.rb
  AL    src/main/ruby/shell/commands/move.rb
  AL    src/main/ruby/shell/commands/zk_dump.rb
  AL    src/main/ruby/shell/commands/delete.rb
  AL    src/main/ruby/shell/commands/alter.rb
  AL    src/main/ruby/shell/commands/put.rb
  AL    src/main/ruby/shell/commands/status.rb
  AL    src/main/ruby/shell/commands/count.rb
  AL    src/main/ruby/shell/commands/is_disabled.rb
  AL    src/main/ruby/shell/commands/assign.rb
  AL    src/main/ruby/shell/commands/enable_peer.rb
  AL    src/main/ruby/shell/commands/remove_peer.rb
  AL    src/main/ruby/shell/commands/get.rb
  AL    src/main/ruby/shell/commands/drop.rb
  AL    src/main/ruby/shell/commands/is_enabled.rb
  AL    src/main/ruby/shell/commands/exists.rb
  AL    src/main/ruby/shell/commands/unassign.rb
  AL    src/main/ruby/shell/commands/add_peer.rb
  AL    src/main/ruby/shell/commands/get_counter.rb
  AL    src/main/ruby/shell/commands/start_replication.rb
  AL    src/main/ruby/shell/commands/incr.rb
  AL    src/main/ruby/shell/commands/truncate.rb
  AL    src/main/ruby/shell/commands/stop_replication.rb
  AL    src/main/ruby/shell/commands/disable_peer.rb
  AL    src/main/ruby/shell/commands/deleteall.rb
  AL    src/main/ruby/shell/commands/balancer.rb
  AL    src/main/ruby/shell/commands/describe.rb
  AL    src/main/ruby/shell/commands/list.rb
  AL    src/main/ruby/shell/commands/disable.rb
 !????? src/main/ruby/shell/commands/shutdown.rb
  AL    src/main/ruby/shell/commands/version.rb
  AL    src/main/ruby/shell/commands/close_region.rb
  AL    src/main/ruby/shell/commands/split.rb
  AL    src/main/ruby/shell/commands/compact.rb
  AL    src/main/ruby/shell/commands/flush.rb
  AL    src/main/ruby/shell/commands/scan.rb
  AL    src/main/ruby/shell/commands/enable.rb
  AL    src/main/ruby/shell/commands.rb
  AL    src/main/ruby/irb/hirb.rb
 !????? src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ColumnSchemaMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ScannerMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellSetMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableSchemaMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableInfoMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/VersionMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableListMessage.proto
  AL    src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift
 !????? src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties
 !????? src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties
  AL    src/main/resources/hbase-default.xml
  B     src/main/resources/hbase-webapps/static/hbase_logo_med.gif
 !????? src/main/resources/hbase-webapps/static/hbase.css
 !????? src/main/resources/hbase-webapps/master/master.jsp
 !????? src/main/resources/hbase-webapps/master/table.jsp
 !????? src/main/resources/hbase-webapps/master/zk.jsp
 !????? src/main/resources/hbase-webapps/master/index.html
 !????? src/main/resources/hbase-webapps/regionserver/index.html
 !????? src/main/resources/hbase-webapps/regionserver/regionserver.jsp
  AL    src/main/java/org/apache/hadoop/hbase/TableExistsException.java
  AL    src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
  AL    src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java
  AL    src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
  AL    src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java
  AL    src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetrics.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTStatistics.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/transform/Base64.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/transform/Transform.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/transform/NullTransform.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
 !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/Constants.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/Main.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
  AL    src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/HServerLoad.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java
  AL    src/main/java/org/apache/hadoop/hbase/client/RowLock.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
  AL    src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java
  AL    src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
  AL    src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
  AL    src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Delete.java
  AL    src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HConnection.java
  AL    src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Row.java
  AL    src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java
  AL    src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java
  AL    src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java
  AL    src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HTable.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Increment.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Action.java
  AL    src/main/java/org/apache/hadoop/hbase/client/MultiPut.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Put.java
  AL    src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
  AL    src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Result.java
  AL    src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
  AL    src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
  AL    src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
  AL    src/main/java/org/apache/hadoop/hbase/client/package-info.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Scan.java
  AL    src/main/java/org/apache/hadoop/hbase/client/Get.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java
 !????? src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServerArg.java
  AL    src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
  AL    src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java
  AL    src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
  AL    src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java
  AL    src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
  AL    src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
  AL    src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
  AL    src/main/java/org/apache/hadoop/hbase/HServerInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/HConstants.java
  AL    src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java
  AL    src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java
  AL    src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
  AL    src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
  AL    src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
  AL    src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
  AL    src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
  AL    src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
  AL    src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java
  AL    src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
  AL    src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
  AL    src/main/java/org/apache/hadoop/hbase/master/LogCleanerDelegate.java
  AL    src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java
  AL    src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
  AL    src/main/java/org/apache/hadoop/hbase/master/HMaster.java
  AL    src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java
  AL    src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
  AL    src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java
  AL    src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
  AL    src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java
  AL    src/main/java/org/apache/hadoop/hbase/util/HMerge.java
  AL    src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java
  AL    src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java
  AL    src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java
  AL    src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java
  AL    src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
  AL    src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
  AL    src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java
  AL    src/main/java/org/apache/hadoop/hbase/util/InfoServer.java
  AL    src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Hash.java
  AL    src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
  AL    src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
  AL    src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Base64.java
  AL    src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java
  AL    src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Bytes.java
  AL    src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Pair.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Merge.java
  AL    src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java
  AL    src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Writables.java
  AL    src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Threads.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Keying.java
  AL    src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Strings.java
  AL    src/main/java/org/apache/hadoop/hbase/util/DynamicByteBloomFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
  AL    src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
  AL    src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
  AL    src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
  AL    src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java
  AL    src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
  AL    src/main/java/org/apache/hadoop/hbase/security/User.java
  AL    src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
  AL    src/main/java/org/apache/hadoop/hbase/avro/package.html
  AL    src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro
  AL    src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java
  AL    src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java
 !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java
  AL    src/main/java/org/apache/hadoop/hbase/HMsg.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
  AL    src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
  AL    src/main/java/org/apache/hadoop/hbase/RegionException.java
  AL    src/main/java/org/apache/hadoop/hbase/Abortable.java
  AL    src/main/java/org/apache/hadoop/hbase/metrics/MetricsString.java
  AL    src/main/java/org/apache/hadoop/hbase/metrics/PersistentMetricsTimeVaryingRate.java
  AL    src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java
  AL    src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java
  AL    src/main/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java
  AL    src/main/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/Filter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java
 !????? src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/WritableByteArrayComparable.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/package-info.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
  AL    src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java
  AL    src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/ByteBufferOutputStream.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningException.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
  AL    src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java
  AL    src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java
  AL    src/main/java/org/apache/hadoop/hbase/Server.java
  AL    src/main/java/org/apache/hadoop/hbase/KeyValue.java
  AL    src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRootHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/PriorityCompactionQueue.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/DebugPrint.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALObserver.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/RegionAlreadyInTransitionException.java
  AL    src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
  AL    src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
  AL    src/main/java/org/apache/hadoop/hbase/Chore.java
  AL    src/main/java/org/apache/hadoop/hbase/Stoppable.java
  AL    src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
  AL    src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java
  AL    src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
  AL    src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
  AL    src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/BoundedRangeFileInputStream.java
  AL    src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
  AL    src/main/java/org/apache/hadoop/hbase/io/HeapSize.java
  AL    src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
  AL    src/main/java/org/apache/hadoop/hbase/io/Reference.java
  AL    src/main/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java
  AL    src/main/java/org/apache/hadoop/hbase/MasterAddressTracker.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
  AL    src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
  AL    src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
  AL    src/main/java/org/apache/hadoop/hbase/HServerAddress.java
  AL    src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java
  AL    src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatistics.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
  AL    src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceMetrics.java
  AL    src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
  AL    src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
  AL    src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
  AL    src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java
  AL    src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
  AL    src/main/xslt/configuration_to_docbook_section.xsl
  AL    src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html
 !????? src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/Hbase.html
 !????? src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/index.html
 !????? src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css
  AL    src/main/javadoc/org/apache/hadoop/hbase/ipc/package.html
  AL    src/main/javadoc/org/apache/hadoop/hbase/io/hfile/package.html
  AL    src/main/javadoc/org/apache/hadoop/hbase/replication/package.html
  AL    src/main/javadoc/overview.html
  AL    src/test/ruby/hbase/hbase_test.rb
  AL    src/test/ruby/hbase/admin_test.rb
  AL    src/test/ruby/hbase/table_test.rb
  AL    src/test/ruby/shell/commands_test.rb
  AL    src/test/ruby/shell/formatter_test.rb
  AL    src/test/ruby/shell/shell_test.rb
  AL    src/test/ruby/tests_runner.rb
 !????? src/test/ruby/test_helper.rb
  AL    src/test/resources/hbase-site.xml
 !????? src/test/resources/log4j.properties
  AL    src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties
 !????? src/test/resources/mapred-queues.xml
  AL    src/test/java/org/apache/hadoop/hbase/TestHMsg.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdmin.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestTransform.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java
  AL    src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
  AL    src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
  AL    src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
  AL    src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestShell.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestGetRowVersions.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestResult.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
  AL    src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
 !????? src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
  AL    src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java
  AL    src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServerArg.java
  AL    src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java
  AL    src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperNodeTracker.java
  AL    src/test/java/org/apache/hadoop/hbase/BROKE_TODO_FIX_TestAcidGuarantees.java
  AL    src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
  AL    src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java
  AL    src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
  AL    src/test/java/org/apache/hadoop/hbase/TestSerialization.java
  AL    src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
  AL    src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
  AL    src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
  AL    src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java
  AL    src/test/java/org/apache/hadoop/hbase/master/BROKE_FIX_TestKillingServersFromMaster.java
  AL    src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
  AL    src/test/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManagerTestHelper.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestBase64.java
  AL    src/test/java/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestKeying.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java
  AL    src/test/java/org/apache/hadoop/hbase/util/DisabledTestMetaUtils.java
  AL    src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java
  AL    src/test/java/org/apache/hadoop/hbase/security/TestUser.java
  AL    src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java
  AL    src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java
  AL    src/test/java/org/apache/hadoop/hbase/TestInfoServers.java
  AL    src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
  AL    src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
  AL    src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
  AL    src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
  AL    src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java
  AL    src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestPrefixFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java
 !????? src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
  AL    src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
  AL    src/test/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
  AL    src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
  AL    src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALObserver.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcMetrics.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityCompactionQueue.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestReadWriteConsistencyControl.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueSkipListSet.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java
  AL    src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
  AL    src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
  AL    src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java
  AL    src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
  AL    src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
  AL    src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java
  AL    src/test/java/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java
  AL    src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
  AL    src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java
  AL    src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java
  AL    src/test/java/org/apache/hadoop/hbase/EmptyWatcher.java
  AL    src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
  AL    src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
  AL    src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
  AL    src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
  AL    src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java
  AL    src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java
  AL    src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
  AL    src/test/java/org/apache/hadoop/hbase/TestCompare.java
  AL    src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java
  AL    src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
  AL    src/examples/thrift/Makefile
  AL    src/examples/thrift/DemoClient.php
  N     src/examples/thrift/README.txt
  AL    src/examples/thrift/DemoClient.rb
  AL    src/examples/thrift/DemoClient.cpp
  AL    src/examples/thrift/DemoClient.java
  AL    src/examples/thrift/DemoClient.py
  N     src/examples/README.txt
  AL    src/examples/mapreduce/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java
  AL    src/examples/mapreduce/org/apache/hadoop/hbase/mapreduce/SampleUploader.java
 !????? src/examples/mapreduce/index-builder-setup.rb
  N     README.txt
  B     .partitions_1311110117679.crc
  N     NOTICE.txt
 !????? partitions_1311186390587
 !????? partitions_1311317807683
  B     .partitions_1311144423174.crc
 !????? pom.xml
  AL    bin/loadtable.rb
  AL    bin/hbase
  AL    bin/rolling-restart.sh
  AL    bin/check_meta.rb
  AL    bin/master-backup.sh
 !????? bin/local-master-backup.sh
  AL    bin/rename_table.rb
  AL    bin/zookeepers.sh
  AL    bin/stop-hbase.sh
  AL    bin/copy_table.rb
  AL    bin/regionservers.sh
  AL    bin/region_mover.rb
  AL    bin/set_meta_memstore_size.rb
  AL    bin/hbase-daemon.sh
 !????? bin/local-regionservers.sh
  AL    bin/hbase-daemons.sh
  AL    bin/graceful_stop.sh
  AL    bin/add_table.rb
  AL    bin/hirb.rb
 !????? bin/set_meta_block_caching.rb
  AL    bin/hbase-config.sh
  AL    bin/replication/copy_tables_desc.rb
  AL    bin/start-hbase.sh
  AL    conf/hbase-site.xml
 !????? conf/log4j.properties
 !????? conf/hadoop-metrics.properties
  AL    conf/hbase-env.sh
 !????? conf/regionservers
  B     .partitions_1311317807683.crc
 
 *****************************************************
 Printing headers for files without AL header...
 
 
 =======================================================================
 ==partitions_1311144423174
 =======================================================================
SEQ?1org.apache.hadoop.hbase.io.ImmutableBytesWritable!org.apache.hadoop.io.NullWritable??*org.apache.hadoop.io.compress.DefaultCodec???????R/???<?f????L????????????aaax???????????????????gggx???????????????????zzzx???????

 =======================================================================
 ==.gitignore
 =======================================================================
/.classpath
/.externalToolBuilders
/.project
/.settings
/build
/.idea/
/logs
/target
*.iml
*.orig
*~

 =======================================================================
 ==CHANGES.txt
 =======================================================================
HBase Change Log
Release 0.90.4 - Unreleased
  BUG FIXES
   HBASE-3617  NoRouteToHostException during balancing will cause Master abort
               (Ted Yu)
   HBASE-3878  Hbase client throws NoSuchElementException (Ted Yu)
   HBASE-3881  Add disable balancer in graceful_stop.sh script
   HBASE-3895  Fix order of parameters after HBASE-1511
   HBASE-3874  ServerShutdownHandler fails on NPE if a plan has a random
               region assignment
   HBASE-3902  Add Bytes.toBigDecimal and Bytes.toBytes(BigDecimal)
               (Vaibhav Puranik)
   HBASE-3820  Splitlog() executed while the namenode was in safemode may
               cause data-loss (Jieshan Bean)
   HBASE-3905  HBaseAdmin.createTableAsync() should check for invalid split
               keys. (Ted Yu)
   HBASE-3912  [Stargate] Columns not handle by Scan
   HBASE-3908  TableSplit not implementing "hashCode" problem (Daniel Iancu)
   HBASE-3195  Binary row keys in hbck and other miscellaneous binary key
               display issues
   HBASE-3914  ROOT region appeared in two regionserver's onlineRegions at
               the same time (Jieshan Bean)
   HBASE-3934  MemStoreFlusher.getMemStoreLimit() doesn't honor defaultLimit
               (Ted Yu)
   HBASE-3946  The splitted region can be online again while the standby
               hmaster becomes the active one (Jieshan Bean)
   HBASE-3723  Major compact should be done when there is only one storefile
               and some keyvalue is outdated (Zhou Shuaifeng)
   HBASE-3892  Table can't disable (Gao Jinchao)
   HBASE-3894  Thread contention over row locks set monitor (Dave Latham)
   HBASE-3794  Ability to Discard Bad HTable Puts
   HBASE-3916  Fix the default bind address of ThriftServer to be wildcard
               instead of localhost. (Li Pi)
   HBASE-3985  Same Region could be picked out twice in LoadBalancer
               (Jieshan Bean)
   HBASE-3987  Fix a NullPointerException on a failure to load Bloom filter data
               (Mikhail Bautin)
   HBASE-3948  Improve split/compact result page for RegionServer status page
               (Li Pi)
   HBASE-3988  Infinite loop for secondary master (Liyin Tang)
   HBASE-3989  Error occured while RegionServer report to Master "we are up"
               should get master address again (Jieshan Bean)
   HBASE-3995  HBASE-3946 broke TestMasterFailover
   HBASE-2077  NullPointerException with an open scanner that expired causing
               an immediate region server shutdown -- part 2.
   HBASE-4005  close_region bugs
   HBASE-3969  Outdated data can not be cleaned in time (zhoushuaifeng)
   HBASE-4028  Hmaster crashes caused by splitting log. (Gao jinchao)
   HBASE-4035  Fix local-master-backup.sh - parameter order wrong (Lars George)
   HBASE-4020  "testWritesWhileGetting" unit test needs to be fixed.

 =======================================================================
 ==partitions_1311110117679
 =======================================================================
SEQ?1org.apache.hadoop.hbase.io.ImmutableBytesWritable!org.apache.hadoop.io.NullWritable??*org.apache.hadoop.io.compress.DefaultCodec????g??d???t?T??????????????????aaax???????????????????gggx???????????????????zzzx???????

 =======================================================================
 ==src/site/site.xml
 =======================================================================
<?xml version="1.0" encoding="ISO-8859-1"?>

<project xmlns="http://maven.apache.org/DECORATION/1.0.0"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/DECORATION/1.0.0 http://maven.apache.org/xsd/decoration-1.0.0.xsd">
  <bannerLeft>
    <name>HBase</name>
    <src>images/hbase_logo_med.gif</src>
    <href>http://hbase.apache.org/</href>
  </bannerLeft>
  <bannerRight>
      <src>images/asf_logo_wide.png</src>
    <href>http://www.apache.org/</href>
  </bannerRight>
  <version position="right" />
  <publishDate position="right" />
  <body>
    <menu name="HBase Project">
      <item name="Overview" href="index.html"/>
      <item name="License" href="license.html" />
      <item name="Downloads" href="http://www.apache.org/dyn/closer.cgi/hbase/" />
      <item name="Release Notes" href="https://issues.apache.org/jira/browse/HBASE?report=com.atlassian.jira.plugin.system.project:changelog-panel" />
      <item name="Issue Tracking" href="issue-tracking.html" />
      <item name="Mailing Lists" href="mail-lists.html" />
      <item name="Source Repository" href="source-repository.html" />
      <item name="Team" href="team-list.html" />
    </menu>
    <menu name="Documentation">
      <item name="Getting Started: Quick" href="quickstart.html" />
      <item name="Getting Started: Detailed" href="notsoquick.html" />
      <item name="API" href="apidocs/index.html" />
      <item name="X-Ref" href="xref/index.html" />
      <item name="Book"      href="book.html" />
      <item name="FAQ" href="faq.html" />
      <item name="Wiki" href="http://wiki.apache.org/hadoop/Hbase" />
      <item name="ACID Semantics" href="acid-semantics.html" />
      <item name="Bulk Loads" href="bulk-loads.html" />
      <item name="Metrics"      href="metrics.html" />
      <item name="HBase on Windows"      href="cygwin.html" />
      <item name="Cluster replication"      href="replication.html" />
      <item name="Pseudo-Dist. Extras"      href="pseudo-distributed.html" />
    </menu>
  </body>
    <skin>
        <groupId>org.apache.maven.skins</groupId>
      <artifactId>maven-stylus-skin</artifactId>
    </skin>
</project>

 =======================================================================
 ==src/site/site.vm
 =======================================================================
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<!-- Generated by Apache Maven Doxia at $dateFormat.format( $currentDate ) -->
#macro ( link $href $name $target $img $position $alt $border $width $height )
  #set ( $linkTitle = ' title="' + $name + '"' )
  #if( $target )
    #set ( $linkTarget = ' target="' + $target + '"' )
  #else
    #set ( $linkTarget = "" )
  #end
  #if ( ( $href.toLowerCase().startsWith("http") || $href.toLowerCase().startsWith("https") ) )
    #set ( $linkClass = ' class="externalLink"' )
  #else
    #set ( $linkClass = "" )
  #end
  #if ( $img )
    #if ( $position == "left" )
      <a href="$href"$linkClass$linkTarget$linkTitle>#image($img $alt $border $width $height)$name</a>
    #else
      <a href="$href"$linkClass$linkTarget$linkTitle>$name #image($img $alt $border $width $height)</a>
    #end
  #else
    <a href="$href"$linkClass$linkTarget$linkTitle>$name</a>
  #end
#end
##
#macro ( image $img $alt $border $width $height )
  #if( $img )
    #if ( ! ( $img.toLowerCase().startsWith("http") || $img.toLowerCase().startsWith("https") ) )
      #set ( $imgSrc = $PathTool.calculateLink( $img, . ) )
      #set ( $imgSrc = $imgSrc.replaceAll( "\\", "/" ) )
      #set ( $imgSrc = ' src="' + $imgSrc + '"' )
    #else
      #set ( $imgSrc = ' src="' + $img + '"' )
    #end
    #if( $alt )
      #set ( $imgAlt = ' alt="' + $alt + '"' )
    #else
      #set ( $imgAlt = ' alt=""' )
    #end
    #if( $border )
      #set ( $imgBorder = ' border="' + $border + '"' )
    #else
      #set ( $imgBorder = "" )
    #end
    #if( $width )
      #set ( $imgWidth = ' width="' + $width + '"' )
    #else
      #set ( $imgWidth = "" )
    #end
    #if( $height )

 =======================================================================
 ==src/site/resources/css/freebsd_docbook.css
 =======================================================================
/*
 * Copyright (c) 2001, 2003, 2010 The FreeBSD Documentation Project
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 * $FreeBSD: doc/share/misc/docbook.css,v 1.15 2010/03/20 04:15:01 hrs Exp $
 */

BODY ADDRESS {
	line-height: 1.3;
	margin: .6em 0;
}

BODY BLOCKQUOTE {
	margin-top: .75em;
	line-height: 1.5;
	margin-bottom: .75em;
}

HTML BODY {
	margin: 1em 8% 1em 10%;
	line-height: 1.2;
}

.LEGALNOTICE {
	font-size: small;
	font-variant: small-caps;
}

BODY DIV {

 =======================================================================
 ==src/docbkx/build.xml
 =======================================================================
<?xml version="1.0"?>
    <chapter xml:id="build"
      version="5.0" xmlns="http://docbook.org/ns/docbook"
      xmlns:xlink="http://www.w3.org/1999/xlink"
      xmlns:xi="http://www.w3.org/2001/XInclude"
      xmlns:svg="http://www.w3.org/2000/svg"
      xmlns:m="http://www.w3.org/1998/Math/MathML"
      xmlns:html="http://www.w3.org/1999/xhtml"
      xmlns:db="http://docbook.org/ns/docbook">
    <title>Building HBase</title>
    <section xml:id="mvn_repo">
        <title>Adding an HBase release to Apache's Maven Repository</title>
        <para>Follow the instructions at
        <link xlink:href="http://www.apache.org/dev/publishing-maven-artifacts.html">Publishing Maven Artifacts</link>.
            The 'trick' to making it all work is answering the questions put to you by the mvn release plugin properly,
            making sure it is using the actual branch AND before doing the <command>mvn release:perform</command> step,
            VERY IMPORTANT, hand edit the release.properties file that was put under <varname>${HBASE_HOME}</varname>
            by the previous step, <command>release:perform</command>. You need to edit it to make it point at
            right locations in SVN.
        </para>
        <para>If you see run into the below, its because you need to edit version in the pom.xml and add
        <code>-SNAPSHOT</code> to the version (and commit).
        <programlisting>[INFO] Scanning for projects...
[INFO] Searching repository for plugin with prefix: 'release'.
[INFO] ------------------------------------------------------------------------
[INFO] Building HBase
[INFO]    task-segment: [release:prepare] (aggregator-style)
[INFO] ------------------------------------------------------------------------
[INFO] [release:prepare {execution: default-cli}]
[INFO] ------------------------------------------------------------------------
[ERROR] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] You don't have a SNAPSHOT project in the reactor projects list.
[INFO] ------------------------------------------------------------------------
[INFO] For more information, run Maven with the -e switch
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 3 seconds
[INFO] Finished at: Sat Mar 26 18:11:07 PDT 2011
[INFO] Final Memory: 35M/423M
[INFO] -----------------------------------------------------------------------</programlisting>
        </para>
    </section>
          
    </chapter>

 =======================================================================
 ==src/docbkx/configuration.xml
 =======================================================================
<?xml version="1.0"?>
  <chapter xml:id="configuration"
      version="5.0" xmlns="http://docbook.org/ns/docbook"
      xmlns:xlink="http://www.w3.org/1999/xlink"
      xmlns:xi="http://www.w3.org/2001/XInclude"
      xmlns:svg="http://www.w3.org/2000/svg"
      xmlns:m="http://www.w3.org/1998/Math/MathML"
      xmlns:html="http://www.w3.org/1999/xhtml"
      xmlns:db="http://docbook.org/ns/docbook">
    <title>Configuration</title>
    <para>
        HBase uses the same configuration system as Hadoop.
        To configure a deploy, edit a file of environment variables
        in <filename>conf/hbase-env.sh</filename> -- this configuration
        is used mostly by the launcher shell scripts getting the cluster
        off the ground -- and then add configuration to an XML file to
        do things like override HBase defaults, tell HBase what Filesystem to
        use, and the location of the ZooKeeper ensemble
        <footnote>
<para>
Be careful editing XML.  Make sure you close all elements.
Run your file through <command>xmllint</command> or similar
to ensure well-formedness of your document after an edit session.
</para>
        </footnote>
        .
    </para>

    <para>When running in distributed mode, after you make
    an edit to an HBase configuration, make sure you copy the
    content of the <filename>conf</filename> directory to
    all nodes of the cluster.  HBase will not do this for you.
    Use <command>rsync</command>.</para>


    <section xml:id="hbase.site">
    <title><filename>hbase-site.xml</filename> and <filename>hbase-default.xml</filename></title>
    <para>Just as in Hadoop where you add site-specific HDFS configuration
    to the <filename>hdfs-site.xml</filename> file,
    for HBase, site specific customizations go into
    the file <filename>conf/hbase-site.xml</filename>.
    For the list of configurable properties, see
    <xref linkend="hbase_default_configurations" />
    below or view the raw <filename>hbase-default.xml</filename>
    source file in the HBase source code at
    <filename>src/main/resources</filename>.
    </para>
    <para>
    Not all configuration options make it out to
    <filename>hbase-default.xml</filename>.  Configuration

 =======================================================================
 ==src/docbkx/getting_started.xml
 =======================================================================
<?xml version="1.0" encoding="UTF-8"?>
<chapter version="5.0" xml:id="getting_started"
         xmlns="http://docbook.org/ns/docbook"
         xmlns:xlink="http://www.w3.org/1999/xlink"
         xmlns:xi="http://www.w3.org/2001/XInclude"
         xmlns:svg="http://www.w3.org/2000/svg"
         xmlns:m="http://www.w3.org/1998/Math/MathML"
         xmlns:html="http://www.w3.org/1999/xhtml"
         xmlns:db="http://docbook.org/ns/docbook">
  <title>Getting Started</title>

  <section>
    <title>Introduction</title>

    <para><xref linkend="quickstart" /> will get you up and
    running on a single-node instance of HBase using the local filesystem. The
    <xref linkend="notsoquick" /> describes setup
    of HBase in distributed mode running on top of HDFS.</para>
  </section>

  <section xml:id="quickstart">
    <title>Quick Start</title>

    <para>This guide describes setup of a standalone HBase instance that uses
    the local filesystem. It leads you through creating a table, inserting
    rows via the HBase <command>shell</command>, and then cleaning
    up and shutting down your standalone HBase instance. The below exercise
    should take no more than ten minutes (not including download time).</para>

    <section>
      <title>Download and unpack the latest stable release.</title>

      <para>Choose a download site from this list of <link
      xlink:href="http://www.apache.org/dyn/closer.cgi/hbase/">Apache Download
      Mirrors</link>. Click on suggested top link. This will take you to a
      mirror of <emphasis>HBase Releases</emphasis>. Click on the folder named
      <filename>stable</filename> and then download the file that ends in
      <filename>.tar.gz</filename> to your local filesystem; e.g.
      <filename>hbase-<?eval ${project.version}?>.tar.gz</filename>.</para>

      <para>Decompress and untar your download and then change into the
      unpacked directory.</para>

      <para><programlisting>$ tar xfz hbase-<?eval ${project.version}?>.tar.gz
$ cd hbase-<?eval ${project.version}?>
</programlisting></para>

      <para>At this point, you are ready to start HBase. But before starting
      it, you might want to edit <filename>conf/hbase-site.xml</filename> and
      set the directory you want HBase to write to,

 =======================================================================
 ==src/docbkx/preface.xml
 =======================================================================
<?xml version="1.0" encoding="UTF-8"?>
<preface version="5.0" xml:id="preface" xmlns="http://docbook.org/ns/docbook"
         xmlns:xlink="http://www.w3.org/1999/xlink"
         xmlns:xi="http://www.w3.org/2001/XInclude"
         xmlns:svg="http://www.w3.org/2000/svg"
         xmlns:m="http://www.w3.org/1998/Math/MathML"
         xmlns:html="http://www.w3.org/1999/xhtml"
         xmlns:db="http://docbook.org/ns/docbook">
  <title>Preface</title>

  <para>This book aims to be the official guide for the <link
  xlink:href="http://hbase.apache.org/">HBase</link> version it ships with.
  This document describes HBase version <emphasis><?eval ${project.version}?></emphasis>.
  Herein you will find either the definitive documentation on an HBase topic
  as of its standing when the referenced HBase version shipped, or this book
  will point to the location in <link
  xlink:href="http://hbase.apache.org/docs/current/api/index.html">javadoc</link>,
  <link xlink:href="https://issues.apache.org/jira/browse/HBASE">JIRA</link>
  or <link xlink:href="http://wiki.apache.org/hadoop/Hbase">wiki</link> where
  the pertinent information can be found.</para>

  <para>This book is a work in progress. It is lacking in many areas but we
  hope to fill in the holes with time. Feel free to add to this book by adding
  a patch to an issue up in the HBase <link
  xlink:href="https://issues.apache.org/jira/browse/HBASE">JIRA</link>.</para>

  <note xml:id="headsup">
      <title>Heads-up</title>
      <para>
          If this is your first foray into the wonderful world of
          Distributed Computing, then you are in for
          some interesting times.  First off, distributed systems are
          hard; making a distributed system hum requires a disparate
          skillset that needs span systems (hardware and software) and
          networking.  Your cluster' operation can hiccup because of any
          of a myriad set of reasons from bugs in HBase itself through misconfigurations
          -- misconfiguration of HBase but also operating system misconfigurations --
          through to hardware problems whether it be a bug in your network card
          drivers or an underprovisioned RAM bus (to mention two recent
          examples of hardware issues that manifested as "HBase is slow").
          You will also need to do a recalibration if up to this your
          computing has been bound to a single box.  Here is one good
          starting point:
          <link xlink:href="http://en.wikipedia.org/wiki/Fallacies_of_Distributed_Computing">Fallacies of Distributed Computing</link>.
      </para>
  </note>
</preface>

 =======================================================================
 ==src/docbkx/shell.xml
 =======================================================================
<?xml version="1.0"?>
  <chapter xml:id="shell"
      version="5.0" xmlns="http://docbook.org/ns/docbook"
      xmlns:xlink="http://www.w3.org/1999/xlink"
      xmlns:xi="http://www.w3.org/2001/XInclude"
      xmlns:svg="http://www.w3.org/2000/svg"
      xmlns:m="http://www.w3.org/1998/Math/MathML"
      xmlns:html="http://www.w3.org/1999/xhtml"
      xmlns:db="http://docbook.org/ns/docbook">
    <title>The HBase Shell</title>

    <para>
        The HBase Shell is <link xlink:href="http://jruby.org">(J)Ruby</link>'s
        IRB with some HBase particular commands added.  Anything you can do in
        IRB, you should be able to do in the HBase Shell.</para>
        <para>To run the HBase shell, 
        do as follows:
        <programlisting>$ ./bin/hbase shell</programlisting>
        </para>
            <para>Type <command>help</command> and then <command>&lt;RETURN&gt;</command>
            to see a listing of shell
            commands and options. Browse at least the paragraphs at the end of
            the help emission for the gist of how variables and command
            arguments are entered into the
            HBase shell; in particular note how table names, rows, and
            columns, etc., must be quoted.</para>
        <para>See <xref linkend="shell_exercises" />
            for example basic shell operation.</para>

    <section xml:id="scripting"><title>Scripting</title>
        <para>For examples scripting HBase, look in the
            HBase <filename>bin</filename> directory.  Look at the files
            that end in <filename>*.rb</filename>.  To run one of these
            files, do as follows:
            <programlisting>$ ./bin/hbase org.jruby.Main PATH_TO_SCRIPT</programlisting>
        </para>
    </section>

    <section xml:id="shell_tricks"><title>Shell Tricks</title>
        <section><title><filename>irbrc</filename></title>
                <para>Create an <filename>.irbrc</filename> file for yourself in your
                    home directory. Add customizations. A useful one is
                    command history so commands are save across Shell invocations:
                    <programlisting>
                        $ more .irbrc
                        require 'irb/ext/save-history'
                        IRB.conf[:SAVE_HISTORY] = 100
                        IRB.conf[:HISTORY_FILE] = "#{ENV['HOME']}/.irb-save-history"</programlisting>
                See the <application>ruby</application> documentation of
                <filename>.irbrc</filename> to learn about other possible

 =======================================================================
 ==src/docbkx/developer.xml
 =======================================================================
<?xml version="1.0"?>
    <chapter xml:id="build"
      version="5.0" xmlns="http://docbook.org/ns/docbook"
      xmlns:xlink="http://www.w3.org/1999/xlink"
      xmlns:xi="http://www.w3.org/2001/XInclude"
      xmlns:svg="http://www.w3.org/2000/svg"
      xmlns:m="http://www.w3.org/1998/Math/MathML"
      xmlns:html="http://www.w3.org/1999/xhtml"
      xmlns:db="http://docbook.org/ns/docbook">
    <title>Developers</title>
    <section xml:id="ides"> 
        <title>IDEs</title>
        <section xml:id="eclipse">
          <title>Eclipse</title>
          <para>See <link xlink:href="https://issues.apache.org/jira/browse/HBASE-3678">HBASE-3678 Add Eclipse-based Apache Formatter to HBase Wiki</link>
              for an Eclipse formatter to help ensure your code conforms to HBase'y coding conventsion.
          The issue includes instructions for loading the attached formatter.</para>
          
        </section>
    </section> 
    <section xml:id="unit.tests"> 
        <title>Unit Tests</title>
        <para>In HBase we use <link xlink:href="http://junit.org">JUnit</link> 4.
            If you need to run miniclusters of HDFS, ZooKeeper, HBase, or MapReduce testing,
            be sure to checkout the <classname>HBaseTestingUtility</classname>.
            Alex Baranau of Sematext describes how it can be used in
            <link xlink:href="http://blog.sematext.com/2010/08/30/hbase-case-study-using-hbasetestingutility-for-local-testing-development/">HBase Case-Study: Using HBaseTestingUtility for Local Testing and Development</link> (2010).
        </para>
        <section xml:id="mockito">
          <title>Mocito</title>
          <para>Sometimes you don't need a full running server
              unit testing.  For example, some methods can make do with a
              a <classname>org.apache.hadoop.hbase.Server</classname> instance
              or a <classname>org.apache.hadoop.hbase.master.MasterServices</classname>
              Interface reference rather than a full-blown
              <classname>org.apache.hadoop.hbase.master.HMaster</classname>.
              In these cases, you maybe able to get away with a mocked
              <classname>Server</classname> instance.  For example:
              <programlisting>
              </programlisting>
           </para>
        </section>
    </section> 
          
    </chapter>

 =======================================================================
 ==src/docbkx/performance.xml
 =======================================================================
<?xml version="1.0" encoding="UTF-8"?>
<chapter version="5.0" xml:id="performance"
         xmlns="http://docbook.org/ns/docbook"
         xmlns:xlink="http://www.w3.org/1999/xlink"
         xmlns:xi="http://www.w3.org/2001/XInclude"
         xmlns:svg="http://www.w3.org/2000/svg"
         xmlns:m="http://www.w3.org/1998/Math/MathML"
         xmlns:html="http://www.w3.org/1999/xhtml"
         xmlns:db="http://docbook.org/ns/docbook">
  <title>Performance Tuning</title>

  <para>Start with the <link
  xlink:href="http://wiki.apache.org/hadoop/PerformanceTuning">wiki
  Performance Tuning</link> page. It has a general discussion of the main
  factors involved; RAM, compression, JVM settings, etc. Afterward, come back
  here for more pointers.</para>

  <note xml:id="rpc.logging"><title>Enabling RPC-level logging</title>
  <para>Enabling the RPC-level logging on a RegionServer can often given
      insight on timings at the server.  Once enabled, the amount of log
      spewed is voluminous.  It is not recommended that you leave this
      logging on for more than short bursts of time.  To enable RPC-level
      logging, browse to the RegionServer UI and click on 
      <emphasis>Log Level</emphasis>.  Set the log level to <varname>DEBUG</varname> for the package
      <classname>org.apache.hadoop.ipc</classname> (Thats right, for
      hadoop.ipc, NOT, hbase.ipc).  Then tail the RegionServers log.
      Analyze.</para>
  <para>To disable, set the logging level back to <varname>INFO</varname> level.
  </para>
  </note>

  <section xml:id="jvm">
    <title>Java</title>

    <section xml:id="gc">
      <title>The Garbage Collector and HBase</title>

      <section xml:id="gcpause">
        <title>Long GC pauses</title>

        <para>In his presentation, <link
        xlink:href="http://www.slideshare.net/cloudera/hbase-hug-presentation">Avoiding
        Full GCs with MemStore-Local Allocation Buffers</link>, Todd Lipcon
        describes two cases of stop-the-world garbage collections common in
        HBase, especially during loading; CMS failure modes and old generation
        heap fragmentation brought. To address the first, start the CMS
        earlier than default by adding
        <code>-XX:CMSInitiatingOccupancyFraction</code> and setting it down
        from defaults. Start at 60 or 70 percent (The lower you bring down the
        threshold, the more GCing is done, the more CPU used). To address the

 =======================================================================
 ==src/docbkx/upgrading.xml
 =======================================================================
<?xml version="1.0"?>
    <chapter xml:id="upgrading"
      version="5.0" xmlns="http://docbook.org/ns/docbook"
      xmlns:xlink="http://www.w3.org/1999/xlink"
      xmlns:xi="http://www.w3.org/2001/XInclude"
      xmlns:svg="http://www.w3.org/2000/svg"
      xmlns:m="http://www.w3.org/1998/Math/MathML"
      xmlns:html="http://www.w3.org/1999/xhtml"
      xmlns:db="http://docbook.org/ns/docbook">
    <title>Upgrading</title>
    <para>
        Review <xref linkend="requirements" />, in particular the section on Hadoop version.
    </para>
    <section xml:id="upgrade0.90">
    <title>Upgrading to HBase 0.90.x from 0.20.x or 0.89.x</title>
          <para>This version of 0.90.x HBase can be started on data written by
              HBase 0.20.x or HBase 0.89.x.  There is no need of a migration step.
              HBase 0.89.x and 0.90.x does write out the name of region directories
              differently -- it names them with a md5 hash of the region name rather
              than a jenkins hash -- so this means that once started, there is no
              going back to HBase 0.20.x.
          </para>
          <para>
             Be sure to remove the <filename>hbase-default.xml</filename> from
             your <filename>conf</filename>
             directory on upgrade.  A 0.20.x version of this file will have
             sub-optimal configurations for 0.90.x HBase.  The
             <filename>hbase-default.xml</filename> file is now bundled into the
             HBase jar and read from there.  If you would like to review
             the content of this file, see it in the src tree at
             <filename>src/main/resources/hbase-default.xml</filename> or
             see <xref linkend="hbase_default_configurations" />.
          </para>
          <para>
            Finally, if upgrading from 0.20.x, check your 
            <varname>.META.</varname> schema in the shell.  In the past we would
            recommend that users run with a 16kb
            <varname>MEMSTORE_FLUSHSIZE</varname>.
            Run <code>hbase> scan '-ROOT-'</code> in the shell. This will output
            the current <varname>.META.</varname> schema.  Check
            <varname>MEMSTORE_FLUSHSIZE</varname> size.  Is it 16kb (16384)?  If so, you will
            need to change this (The 'normal'/default value is 64MB (67108864)).
            Run the script <filename>bin/set_meta_memstore_size.rb</filename>.
            This will make the necessary edit to your <varname>.META.</varname> schema.
            Failure to run this change will make for a slow cluster <footnote>
            <para>
            See <link xlink:href="https://issues.apache.org/jira/browse/HBASE-3499">HBASE-3499 Users upgrading to 0.90.0 need to have their .META. table updated with the right MEMSTORE_SIZE</link>
            </para>
            </footnote>
            .

 =======================================================================
 ==src/docbkx/troubleshooting.xml
 =======================================================================
<?xml version="1.0" encoding="UTF-8"?>
<chapter version="5.0" xml:id="trouble"
         xmlns="http://docbook.org/ns/docbook"
         xmlns:xlink="http://www.w3.org/1999/xlink"
         xmlns:xi="http://www.w3.org/2001/XInclude"
         xmlns:svg="http://www.w3.org/2000/svg"
         xmlns:m="http://www.w3.org/1998/Math/MathML"
         xmlns:html="http://www.w3.org/1999/xhtml"
         xmlns:db="http://docbook.org/ns/docbook">
  <title>Troubleshooting and Debugging HBase</title>
    <section xml:id="trouble.general">
      <title>General Guidelines</title>
      <para>
          Always start with the master log (TODO: Which lines?).
          Normally it’s just printing the same lines over and over again.
          If not, then there’s an issue.
          Google or <link xlink:href="http://search-hadoop.com">search-hadoop.com</link>
          should return some hits for those exceptions you’re seeing.
      </para>
      <para>
          An error rarely comes alone in HBase, usually when something gets screwed up what will
          follow may be hundreds of exceptions and stack traces coming from all over the place.
          The best way to approach this type of problem is to walk the log up to where it all
          began, for example one trick with RegionServers is that they will print some
          metrics when aborting so grepping for <emphasis>Dump</emphasis>
          should get you around the start of the problem.
      </para>
      <para>
          RegionServer suicides are “normal”, as this is what they do when something goes wrong.
          For example, if ulimit and xcievers (the two most important initial settings, see <xref linkend="ulimit" />)
          aren’t changed, it will make it impossible at some point for DataNodes to create new threads
          that from the HBase point of view is seen as if HDFS was gone. Think about what would happen if your
          MySQL database was suddenly unable to access files on your local file system, well it’s the same with
          HBase and HDFS. Another very common reason to see RegionServers committing seppuku is when they enter
          prolonged garbage collection pauses that last longer than the default ZooKeeper session timeout.
          For more information on GC pauses, see the
          <link xlink:href="http://www.cloudera.com/blog/2011/02/avoiding-full-gcs-in-hbase-with-memstore-local-allocation-buffers-part-1/">3 part blog post</link>  by Todd Lipcon
          and <xref linkend="gcpause" /> above. 
      </para>
    </section>
    <section xml:id="trouble.log">
      <title>Logs</title>
      <para>
      The key process logs are as follows...   (replace &lt;user&gt; with the user that started the service, and &lt;hostname&gt; for the machine name)
      </para>
      <para>
      NameNode:  <filename>$HADOOP_HOME/logs/hadoop-&lt;user&gt;-namenode-&lt;hostname&gt;.log</filename>
      </para>
      <para>
      DataNode:  <filename>$HADOOP_HOME/logs/hadoop-&lt;user&gt;-datanode-&lt;hostname&gt;.log</filename>

 =======================================================================
 ==src/assembly/all.xml
 =======================================================================
<?xml version="1.0"?>
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1"
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
  <!--This 'all' id is not appended to the produced bundle because we do this:
    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
  -->
  <id>all</id>
  <formats>
    <format>tar.gz</format>
  </formats>
  <fileSets>
    <fileSet>
      <includes>
        <include>${basedir}/*.txt</include>
      </includes>
    </fileSet>
    <fileSet>
      <includes>
        <include>pom.xml</include>
      </includes>
    </fileSet>
    <fileSet>
      <directory>src</directory>
    </fileSet>
    <fileSet>
      <directory>conf</directory>
    </fileSet>
    <fileSet>
      <directory>bin</directory>
      <fileMode>755</fileMode>
    </fileSet>
    <fileSet>
      <directory>src/main/ruby</directory>
      <outputDirectory>lib/ruby</outputDirectory>
    </fileSet>
    <fileSet>
      <directory>target</directory>
      <outputDirectory>/</outputDirectory>
      <includes>
          <include>hbase-${project.version}.jar</include>
          <include>hbase-${project.version}-tests.jar</include>
      </includes>
    </fileSet>
    <fileSet>
      <directory>target/hbase-webapps</directory>
      <outputDirectory>hbase-webapps</outputDirectory>
    </fileSet>
    <fileSet>
      <directory>target/site</directory>

 =======================================================================
 ==src/main/ruby/shell/commands/shutdown.rb
 =======================================================================

 =======================================================================
 ==src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd
 =======================================================================
<?xml version="1.0" encoding="UTF-8"?>
<schema targetNamespace="ModelSchema" elementFormDefault="qualified" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="ModelSchema">

    <element name="Version" type="tns:Version"></element>
    
    <complexType name="Version">
      <attribute name="REST" type="string"></attribute>
      <attribute name="JVM" type="string"></attribute>
      <attribute name="OS" type="string"></attribute>
      <attribute name="Server" type="string"></attribute>
      <attribute name="Jersey" type="string"></attribute>
    </complexType>

    <element name="TableList" type="tns:TableList"></element>
    
    <complexType name="TableList">
        <sequence>
            <element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element>
        </sequence>
    </complexType>

    <complexType name="Table">
        <sequence>
            <element name="name" type="string"></element>
        </sequence>
    </complexType>

    <element name="TableInfo" type="tns:TableInfo"></element>
    
    <complexType name="TableInfo">
        <sequence>
            <element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element>
        </sequence>
        <attribute name="name" type="string"></attribute>
    </complexType>

    <complexType name="TableRegion">
        <attribute name="name" type="string"></attribute>
        <attribute name="id" type="int"></attribute>
        <attribute name="startKey" type="base64Binary"></attribute>
        <attribute name="endKey" type="base64Binary"></attribute>
        <attribute name="location" type="string"></attribute>
    </complexType>

    <element name="TableSchema" type="tns:TableSchema"></element>
    
    <complexType name="TableSchema">
        <sequence>
            <element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element>
        </sequence>

 =======================================================================
 ==src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties
 =======================================================================

# ResourceBundle properties file for RowCounter MR job

CounterGroupName=         RowCounter

ROWS.name=                Rows

 =======================================================================
 ==src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties
 =======================================================================

# ResourceBundle properties file for RowCounter MR job

CounterGroupName=         RowCounter

ROWS.name=                Rows

 =======================================================================
 ==src/main/resources/hbase-webapps/static/hbase.css
 =======================================================================
h1, h2, h3 { color: DarkSlateBlue }
table { border: thin solid DodgerBlue }
tr { border: thin solid DodgerBlue }
td { border: thin solid DodgerBlue }
th { border: thin solid DodgerBlue }
#logo {float: left;}
#logo img {border: none;}
#page_title {padding-top: 27px;}

div.warning {
  border: 1px solid #666;
  background-color: #fcc;
  font-size: 110%;
  font-weight: bold;
}

td.undeployed-region {
  background-color: #faa;
}

 =======================================================================
 ==src/main/resources/hbase-webapps/master/master.jsp
 =======================================================================
<%@ page contentType="text/html;charset=UTF-8"
  import="java.util.*"
  import="org.apache.hadoop.conf.Configuration"
  import="org.apache.hadoop.util.StringUtils"
  import="org.apache.hadoop.hbase.util.Bytes"
  import="org.apache.hadoop.hbase.util.JvmVersion"
  import="org.apache.hadoop.hbase.util.FSUtils"
  import="org.apache.hadoop.hbase.master.HMaster"
  import="org.apache.hadoop.hbase.HConstants"
  import="org.apache.hadoop.hbase.client.HBaseAdmin"
  import="org.apache.hadoop.hbase.client.HConnectionManager"
  import="org.apache.hadoop.hbase.HServerInfo"
  import="org.apache.hadoop.hbase.HServerAddress"
  import="org.apache.hadoop.hbase.HTableDescriptor" %><%
  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
  Configuration conf = master.getConfiguration();
  HServerAddress rootLocation = master.getCatalogTracker().getRootLocation();
  boolean metaOnline = master.getCatalogTracker().getMetaLocation() != null;
  Map<String, HServerInfo> serverToServerInfos =
    master.getServerManager().getOnlineServers();
  int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
  if (interval == 0) {
      interval = 1;
  }
  boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
  Map<String, Integer> frags = null;
  if (showFragmentation) {
      frags = FSUtils.getTableFragmentation(master);
  }
%><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %></title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>
<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1>
<p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>

<!-- Various warnings that cluster admins should be aware of -->
<% if (JvmVersion.isBadJvmVersion()) { %>
  <div class="warning">
  Your current JVM version <%= System.getProperty("java.version") %> is known to be
  unstable with HBase. Please see the
  <a href="http://wiki.apache.org/hadoop/Hbase/Troubleshooting#A18">HBase wiki</a>
  for details.
  </div>

 =======================================================================
 ==src/main/resources/hbase-webapps/master/table.jsp
 =======================================================================
<%@ page contentType="text/html;charset=UTF-8"
  import="java.util.Map"
  import="org.apache.hadoop.io.Writable"
  import="org.apache.hadoop.conf.Configuration"
  import="org.apache.hadoop.hbase.client.HTable"
  import="org.apache.hadoop.hbase.client.HBaseAdmin"
  import="org.apache.hadoop.hbase.client.HConnectionManager"
  import="org.apache.hadoop.hbase.HRegionInfo"
  import="org.apache.hadoop.hbase.HServerAddress"
  import="org.apache.hadoop.hbase.HServerInfo"
  import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
  import="org.apache.hadoop.hbase.master.HMaster" 
  import="org.apache.hadoop.hbase.util.Bytes"
  import="org.apache.hadoop.hbase.util.FSUtils"
  import="java.util.Map"
  import="org.apache.hadoop.hbase.HConstants"%><%
  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
  Configuration conf = master.getConfiguration();
  HBaseAdmin hbadmin = new HBaseAdmin(conf);
  String tableName = request.getParameter("name");
  HTable table = new HTable(conf, tableName);
  String tableHeader = "<h2>Table Regions</h2><table><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th></tr>";
  HServerAddress rl = master.getCatalogTracker().getRootLocation();
  boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
  Map<String, Integer> frags = null;
  if (showFragmentation) {
      frags = FSUtils.getTableFragmentation(master);
  }
%>

<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
<html xmlns="http://www.w3.org/1999/xhtml">

<%
  String action = request.getParameter("action");
  String key = request.getParameter("key");
  if ( action != null ) {
%>
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
<meta http-equiv="refresh" content="5,javascript:history.back()" />
</head>
<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Table action request accepted</h1>
<p><hr><p>
<%
  if (action.equals("split")) {

 =======================================================================
 ==src/main/resources/hbase-webapps/master/zk.jsp
 =======================================================================
<%@ page contentType="text/html;charset=UTF-8"
  import="java.io.IOException"
  import="org.apache.hadoop.conf.Configuration"
  import="org.apache.hadoop.hbase.client.HBaseAdmin"
  import="org.apache.hadoop.hbase.client.HConnection"
  import="org.apache.hadoop.hbase.client.HConnectionManager"
  import="org.apache.hadoop.hbase.HRegionInfo"
  import="org.apache.hadoop.hbase.zookeeper.ZKUtil"
  import="org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher"
  import="org.apache.hadoop.hbase.HBaseConfiguration"
  import="org.apache.hadoop.hbase.master.HMaster" 
  import="org.apache.hadoop.hbase.HConstants"%><%
  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
  Configuration conf = master.getConfiguration();
  HBaseAdmin hbadmin = new HBaseAdmin(conf);
  HConnection connection = hbadmin.getConnection();
  ZooKeeperWatcher watcher = connection.getZooKeeperWatcher();
%>

<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<title>ZooKeeper Dump</title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>
<body>
<a id="logo" href="http://hbase.org"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">ZooKeeper Dump</h1>
<p id="links_menu"><a href="/master.jsp">Master</a>, <a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
<hr id="head_rule" />
<pre>
<%= ZKUtil.dump(watcher) %>
<% HConnectionManager.deleteConnection(hbadmin.getConfiguration(), false); %>
</pre>

</body>
</html>

 =======================================================================
 ==src/main/resources/hbase-webapps/master/index.html
 =======================================================================
<meta HTTP-EQUIV="REFRESH" content="0;url=master.jsp"/>

 =======================================================================
 ==src/main/resources/hbase-webapps/regionserver/index.html
 =======================================================================
<meta HTTP-EQUIV="REFRESH" content="0;url=regionserver.jsp"/>

 =======================================================================
 ==src/main/resources/hbase-webapps/regionserver/regionserver.jsp
 =======================================================================
<%@ page contentType="text/html;charset=UTF-8"
  import="java.util.*"
  import="java.io.IOException"
  import="org.apache.hadoop.io.Text"
  import="org.apache.hadoop.hbase.regionserver.HRegionServer"
  import="org.apache.hadoop.hbase.regionserver.HRegion"
  import="org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics"
  import="org.apache.hadoop.hbase.util.Bytes"
  import="org.apache.hadoop.hbase.HConstants"
  import="org.apache.hadoop.hbase.HServerInfo"
  import="org.apache.hadoop.hbase.HServerLoad"
  import="org.apache.hadoop.hbase.HRegionInfo" %><%
  HRegionServer regionServer = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER);
  HServerInfo serverInfo = null;
  try {
    serverInfo = regionServer.getHServerInfo();
  } catch (IOException e) {
    e.printStackTrace();
  }
  RegionServerMetrics metrics = regionServer.getMetrics();
  List<HRegionInfo> onlineRegions = regionServer.getOnlineRegions();
  int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000;

%><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<title>HBase Region Server: <%= serverInfo.getServerAddress().getHostname() %>:<%= serverInfo.getServerAddress().getPort() %></title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>

<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Region Server: <%= serverInfo.getServerAddress().getHostname() %>:<%= serverInfo.getServerAddress().getPort() %></h1>
<p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
<hr id="head_rule" />

<h2>Region Server Attributes</h2>
<table>
<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
<tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase version and svn revision</td></tr>
<tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When HBase version was compiled and by whom</td></tr>
<tr><td>Metrics</td><td><%= metrics.toString() %></td><td>RegionServer Metrics; file and heap sizes are in megabytes</td></tr>
<tr><td>Zookeeper Quorum</td><td><%= regionServer.getZooKeeper().getQuorum() %></td><td>Addresses of all registered ZK servers</td></tr>
</table>

<h2>Online Regions</h2>
<% if (onlineRegions != null && onlineRegions.size() > 0) { %>
<table>

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: StorageClusterStatusMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class StorageClusterStatusMessage {
  private StorageClusterStatusMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class StorageClusterStatus extends
      com.google.protobuf.GeneratedMessage {
    // Use StorageClusterStatus.newBuilder() to construct.
    private StorageClusterStatus() {
      initFields();
    }
    private StorageClusterStatus(boolean noInit) {}
    
    private static final StorageClusterStatus defaultInstance;
    public static StorageClusterStatus getDefaultInstance() {
      return defaultInstance;
    }
    
    public StorageClusterStatus getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
    }
    
    public static final class Region extends
        com.google.protobuf.GeneratedMessage {
      // Use Region.newBuilder() to construct.
      private Region() {
        initFields();
      }
      private Region(boolean noInit) {}
      
      private static final Region defaultInstance;
      public static Region getDefaultInstance() {
        return defaultInstance;
      }
      

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: ScannerMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class ScannerMessage {
  private ScannerMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class Scanner extends
      com.google.protobuf.GeneratedMessage {
    // Use Scanner.newBuilder() to construct.
    private Scanner() {
      initFields();
    }
    private Scanner(boolean noInit) {}
    
    private static final Scanner defaultInstance;
    public static Scanner getDefaultInstance() {
      return defaultInstance;
    }
    
    public Scanner getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable;
    }
    
    // optional bytes startRow = 1;
    public static final int STARTROW_FIELD_NUMBER = 1;
    private boolean hasStartRow;
    private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
    public boolean hasStartRow() { return hasStartRow; }
    public com.google.protobuf.ByteString getStartRow() { return startRow_; }
    
    // optional bytes endRow = 2;
    public static final int ENDROW_FIELD_NUMBER = 2;
    private boolean hasEndRow;
    private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY;
    public boolean hasEndRow() { return hasEndRow; }
    public com.google.protobuf.ByteString getEndRow() { return endRow_; }

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: CellSetMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class CellSetMessage {
  private CellSetMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class CellSet extends
      com.google.protobuf.GeneratedMessage {
    // Use CellSet.newBuilder() to construct.
    private CellSet() {
      initFields();
    }
    private CellSet(boolean noInit) {}
    
    private static final CellSet defaultInstance;
    public static CellSet getDefaultInstance() {
      return defaultInstance;
    }
    
    public CellSet getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable;
    }
    
    public static final class Row extends
        com.google.protobuf.GeneratedMessage {
      // Use Row.newBuilder() to construct.
      private Row() {
        initFields();
      }
      private Row(boolean noInit) {}
      
      private static final Row defaultInstance;
      public static Row getDefaultInstance() {
        return defaultInstance;
      }
      

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: TableSchemaMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class TableSchemaMessage {
  private TableSchemaMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class TableSchema extends
      com.google.protobuf.GeneratedMessage {
    // Use TableSchema.newBuilder() to construct.
    private TableSchema() {
      initFields();
    }
    private TableSchema(boolean noInit) {}
    
    private static final TableSchema defaultInstance;
    public static TableSchema getDefaultInstance() {
      return defaultInstance;
    }
    
    public TableSchema getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable;
    }
    
    public static final class Attribute extends
        com.google.protobuf.GeneratedMessage {
      // Use Attribute.newBuilder() to construct.
      private Attribute() {
        initFields();
      }
      private Attribute(boolean noInit) {}
      
      private static final Attribute defaultInstance;
      public static Attribute getDefaultInstance() {
        return defaultInstance;
      }
      

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: TableInfoMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class TableInfoMessage {
  private TableInfoMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class TableInfo extends
      com.google.protobuf.GeneratedMessage {
    // Use TableInfo.newBuilder() to construct.
    private TableInfo() {
      initFields();
    }
    private TableInfo(boolean noInit) {}
    
    private static final TableInfo defaultInstance;
    public static TableInfo getDefaultInstance() {
      return defaultInstance;
    }
    
    public TableInfo getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable;
    }
    
    public static final class Region extends
        com.google.protobuf.GeneratedMessage {
      // Use Region.newBuilder() to construct.
      private Region() {
        initFields();
      }
      private Region(boolean noInit) {}
      
      private static final Region defaultInstance;
      public static Region getDefaultInstance() {
        return defaultInstance;
      }
      

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: ColumnSchemaMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class ColumnSchemaMessage {
  private ColumnSchemaMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class ColumnSchema extends
      com.google.protobuf.GeneratedMessage {
    // Use ColumnSchema.newBuilder() to construct.
    private ColumnSchema() {
      initFields();
    }
    private ColumnSchema(boolean noInit) {}
    
    private static final ColumnSchema defaultInstance;
    public static ColumnSchema getDefaultInstance() {
      return defaultInstance;
    }
    
    public ColumnSchema getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable;
    }
    
    public static final class Attribute extends
        com.google.protobuf.GeneratedMessage {
      // Use Attribute.newBuilder() to construct.
      private Attribute() {
        initFields();
      }
      private Attribute(boolean noInit) {}
      
      private static final Attribute defaultInstance;
      public static Attribute getDefaultInstance() {
        return defaultInstance;
      }
      

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: CellMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class CellMessage {
  private CellMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class Cell extends
      com.google.protobuf.GeneratedMessage {
    // Use Cell.newBuilder() to construct.
    private Cell() {
      initFields();
    }
    private Cell(boolean noInit) {}
    
    private static final Cell defaultInstance;
    public static Cell getDefaultInstance() {
      return defaultInstance;
    }
    
    public Cell getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable;
    }
    
    // optional bytes row = 1;
    public static final int ROW_FIELD_NUMBER = 1;
    private boolean hasRow;
    private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
    public boolean hasRow() { return hasRow; }
    public com.google.protobuf.ByteString getRow() { return row_; }
    
    // optional bytes column = 2;
    public static final int COLUMN_FIELD_NUMBER = 2;
    private boolean hasColumn;
    private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY;
    public boolean hasColumn() { return hasColumn; }
    public com.google.protobuf.ByteString getColumn() { return column_; }

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: TableListMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class TableListMessage {
  private TableListMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class TableList extends
      com.google.protobuf.GeneratedMessage {
    // Use TableList.newBuilder() to construct.
    private TableList() {
      initFields();
    }
    private TableList(boolean noInit) {}
    
    private static final TableList defaultInstance;
    public static TableList getDefaultInstance() {
      return defaultInstance;
    }
    
    public TableList getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable;
    }
    
    // repeated string name = 1;
    public static final int NAME_FIELD_NUMBER = 1;
    private java.util.List<java.lang.String> name_ =
      java.util.Collections.emptyList();
    public java.util.List<java.lang.String> getNameList() {
      return name_;
    }
    public int getNameCount() { return name_.size(); }
    public java.lang.String getName(int index) {
      return name_.get(index);
    }
    
    private void initFields() {

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java
 =======================================================================
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: VersionMessage.proto

package org.apache.hadoop.hbase.rest.protobuf.generated;

public final class VersionMessage {
  private VersionMessage() {}
  public static void registerAllExtensions(
      com.google.protobuf.ExtensionRegistry registry) {
  }
  public static final class Version extends
      com.google.protobuf.GeneratedMessage {
    // Use Version.newBuilder() to construct.
    private Version() {
      initFields();
    }
    private Version(boolean noInit) {}
    
    private static final Version defaultInstance;
    public static Version getDefaultInstance() {
      return defaultInstance;
    }
    
    public Version getDefaultInstanceForType() {
      return defaultInstance;
    }
    
    public static final com.google.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor;
    }
    
    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable;
    }
    
    // optional string restVersion = 1;
    public static final int RESTVERSION_FIELD_NUMBER = 1;
    private boolean hasRestVersion;
    private java.lang.String restVersion_ = "";
    public boolean hasRestVersion() { return hasRestVersion; }
    public java.lang.String getRestVersion() { return restVersion_; }
    
    // optional string jvmVersion = 2;
    public static final int JVMVERSION_FIELD_NUMBER = 2;
    private boolean hasJvmVersion;
    private java.lang.String jvmVersion_ = "";
    public boolean hasJvmVersion() { return hasJvmVersion; }
    public java.lang.String getJvmVersion() { return jvmVersion_; }

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java
 =======================================================================

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr
 =======================================================================
{
  "protocol" : "HBase",
  "namespace" : "org.apache.hadoop.hbase.avro.generated",
  "types" : [ {
    "type" : "record",
    "name" : "AServerAddress",
    "fields" : [ {
      "name" : "hostname",
      "type" : "string"
    }, {
      "name" : "inetSocketAddress",
      "type" : "string"
    }, {
      "name" : "port",
      "type" : "int"
    } ]
  }, {
    "type" : "record",
    "name" : "ARegionLoad",
    "fields" : [ {
      "name" : "memStoreSizeMB",
      "type" : "int"
    }, {
      "name" : "name",
      "type" : "bytes"
    }, {
      "name" : "storefileIndexSizeMB",
      "type" : "int"
    }, {
      "name" : "storefiles",
      "type" : "int"
    }, {
      "name" : "storefileSizeMB",
      "type" : "int"
    }, {
      "name" : "stores",
      "type" : "int"
    } ]
  }, {
    "type" : "record",
    "name" : "AServerLoad",
    "fields" : [ {
      "name" : "load",
      "type" : "int"
    }, {
      "name" : "maxHeapMB",
      "type" : "int"
    }, {
      "name" : "memStoreSizeInMB",
      "type" : "int"

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AColumnFamilyDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumnFamilyDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]}},{\"name\":\"maxVersions\",\"type\":\"int\"},{\"name\":\"blocksize\",\"type\":\"int\"},{\"name\":\"inMemory\",\"type\":\"boolean\"},{\"name\":\"timeToLive\",\"type\":\"int\"},{\"name\":\"blockCacheEnabled\",\"type\":\"boolean\"},{\"name\":\"bloomfilterEnabled\",\"type\":\"boolean\"}]}");
  public java.nio.ByteBuffer name;
  public org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm compression;
  public int maxVersions;
  public int blocksize;
  public boolean inMemory;
  public int timeToLive;
  public boolean blockCacheEnabled;
  public boolean bloomfilterEnabled;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return name;
    case 1: return compression;
    case 2: return maxVersions;
    case 3: return blocksize;
    case 4: return inMemory;
    case 5: return timeToLive;
    case 6: return blockCacheEnabled;
    case 7: return bloomfilterEnabled;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: name = (java.nio.ByteBuffer)value$; break;
    case 1: compression = (org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm)value$; break;
    case 2: maxVersions = (java.lang.Integer)value$; break;
    case 3: blocksize = (java.lang.Integer)value$; break;
    case 4: inMemory = (java.lang.Boolean)value$; break;
    case 5: timeToLive = (java.lang.Integer)value$; break;
    case 6: blockCacheEnabled = (java.lang.Boolean)value$; break;
    case 7: bloomfilterEnabled = (java.lang.Boolean)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AScan extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AScan\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}");
  public java.nio.ByteBuffer startRow;
  public java.nio.ByteBuffer stopRow;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn> columns;
  public java.lang.Long timestamp;
  public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange;
  public java.lang.Integer maxVersions;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return startRow;
    case 1: return stopRow;
    case 2: return columns;
    case 3: return timestamp;
    case 4: return timerange;
    case 5: return maxVersions;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: startRow = (java.nio.ByteBuffer)value$; break;
    case 1: stopRow = (java.nio.ByteBuffer)value$; break;
    case 2: columns = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn>)value$; break;
    case 3: timestamp = (java.lang.Long)value$; break;
    case 4: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break;
    case 5: maxVersions = (java.lang.Integer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AResult extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResult\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}}}]}");
  public java.nio.ByteBuffer row;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AResultEntry> entries;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return row;
    case 1: return entries;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: row = (java.nio.ByteBuffer)value$; break;
    case 1: entries = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AResultEntry>)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AColumnValue extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumnValue\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}");
  public java.nio.ByteBuffer family;
  public java.nio.ByteBuffer qualifier;
  public java.nio.ByteBuffer value;
  public java.lang.Long timestamp;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return family;
    case 1: return qualifier;
    case 2: return value;
    case 3: return timestamp;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: family = (java.nio.ByteBuffer)value$; break;
    case 1: qualifier = (java.nio.ByteBuffer)value$; break;
    case 2: value = (java.nio.ByteBuffer)value$; break;
    case 3: timestamp = (java.lang.Long)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AClusterStatus extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AClusterStatus\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}}},{\"name\":\"servers\",\"type\":\"int\"}]}");
  public double averageLoad;
  public org.apache.avro.generic.GenericArray<org.apache.avro.util.Utf8> deadServerNames;
  public int deadServers;
  public org.apache.avro.util.Utf8 hbaseVersion;
  public int regionsCount;
  public int requestsCount;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AServerInfo> serverInfos;
  public int servers;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return averageLoad;
    case 1: return deadServerNames;
    case 2: return deadServers;
    case 3: return hbaseVersion;
    case 4: return regionsCount;
    case 5: return requestsCount;
    case 6: return serverInfos;
    case 7: return servers;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: averageLoad = (java.lang.Double)value$; break;
    case 1: deadServerNames = (org.apache.avro.generic.GenericArray<org.apache.avro.util.Utf8>)value$; break;
    case 2: deadServers = (java.lang.Integer)value$; break;
    case 3: hbaseVersion = (org.apache.avro.util.Utf8)value$; break;
    case 4: regionsCount = (java.lang.Integer)value$; break;
    case 5: requestsCount = (java.lang.Integer)value$; break;
    case 6: serverInfos = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AServerInfo>)value$; break;
    case 7: servers = (java.lang.Integer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AServerAddress extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerAddress\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}");
  public org.apache.avro.util.Utf8 hostname;
  public org.apache.avro.util.Utf8 inetSocketAddress;
  public int port;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return hostname;
    case 1: return inetSocketAddress;
    case 2: return port;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: hostname = (org.apache.avro.util.Utf8)value$; break;
    case 1: inetSocketAddress = (org.apache.avro.util.Utf8)value$; break;
    case 2: port = (java.lang.Integer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class ATableDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ATableDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"families\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]}},\"null\"]},{\"name\":\"maxFileSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"memStoreFlushSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"rootRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaTable\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"readOnly\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"deferredLogFlush\",\"type\":[\"boolean\",\"null\"]}]}");
  public java.nio.ByteBuffer name;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor> families;
  public java.lang.Long maxFileSize;
  public java.lang.Long memStoreFlushSize;
  public java.lang.Boolean rootRegion;
  public java.lang.Boolean metaRegion;
  public java.lang.Boolean metaTable;
  public java.lang.Boolean readOnly;
  public java.lang.Boolean deferredLogFlush;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return name;
    case 1: return families;
    case 2: return maxFileSize;
    case 3: return memStoreFlushSize;
    case 4: return rootRegion;
    case 5: return metaRegion;
    case 6: return metaTable;
    case 7: return readOnly;
    case 8: return deferredLogFlush;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: name = (java.nio.ByteBuffer)value$; break;
    case 1: families = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor>)value$; break;
    case 2: maxFileSize = (java.lang.Long)value$; break;
    case 3: memStoreFlushSize = (java.lang.Long)value$; break;
    case 4: rootRegion = (java.lang.Boolean)value$; break;
    case 5: metaRegion = (java.lang.Boolean)value$; break;
    case 6: metaTable = (java.lang.Boolean)value$; break;
    case 7: readOnly = (java.lang.Boolean)value$; break;
    case 8: deferredLogFlush = (java.lang.Boolean)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class TCell extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"TCell\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}");
  public java.nio.ByteBuffer value;
  public long timestamp;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return value;
    case 1: return timestamp;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: value = (java.nio.ByteBuffer)value$; break;
    case 1: timestamp = (java.lang.Long)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AMasterNotRunning extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 message;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return message;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: message = (org.apache.avro.util.Utf8)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class ADelete extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ADelete\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]}]}");
  public java.nio.ByteBuffer row;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn> columns;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return row;
    case 1: return columns;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: row = (java.nio.ByteBuffer)value$; break;
    case 1: columns = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn>)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AFamilyDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]}");
  public java.nio.ByteBuffer name;
  public org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm compression;
  public java.lang.Integer maxVersions;
  public java.lang.Integer blocksize;
  public java.lang.Boolean inMemory;
  public java.lang.Integer timeToLive;
  public java.lang.Boolean blockCacheEnabled;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return name;
    case 1: return compression;
    case 2: return maxVersions;
    case 3: return blocksize;
    case 4: return inMemory;
    case 5: return timeToLive;
    case 6: return blockCacheEnabled;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: name = (java.nio.ByteBuffer)value$; break;
    case 1: compression = (org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm)value$; break;
    case 2: maxVersions = (java.lang.Integer)value$; break;
    case 3: blocksize = (java.lang.Integer)value$; break;
    case 4: inMemory = (java.lang.Boolean)value$; break;
    case 5: timeToLive = (java.lang.Integer)value$; break;
    case 6: blockCacheEnabled = (java.lang.Boolean)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AIllegalArgument extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 message;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return message;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: message = (org.apache.avro.util.Utf8)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AResultEntry extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResultEntry\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}");
  public java.nio.ByteBuffer family;
  public java.nio.ByteBuffer qualifier;
  public java.nio.ByteBuffer value;
  public long timestamp;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return family;
    case 1: return qualifier;
    case 2: return value;
    case 3: return timestamp;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: family = (java.nio.ByteBuffer)value$; break;
    case 1: qualifier = (java.nio.ByteBuffer)value$; break;
    case 2: value = (java.nio.ByteBuffer)value$; break;
    case 3: timestamp = (java.lang.Long)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AServerLoad extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerLoad\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}");
  public int load;
  public int maxHeapMB;
  public int memStoreSizeInMB;
  public int numberOfRegions;
  public int numberOfRequests;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.ARegionLoad> regionsLoad;
  public int storefileIndexSizeInMB;
  public int storefiles;
  public int storefileSizeInMB;
  public int usedHeapMB;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return load;
    case 1: return maxHeapMB;
    case 2: return memStoreSizeInMB;
    case 3: return numberOfRegions;
    case 4: return numberOfRequests;
    case 5: return regionsLoad;
    case 6: return storefileIndexSizeInMB;
    case 7: return storefiles;
    case 8: return storefileSizeInMB;
    case 9: return usedHeapMB;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: load = (java.lang.Integer)value$; break;
    case 1: maxHeapMB = (java.lang.Integer)value$; break;
    case 2: memStoreSizeInMB = (java.lang.Integer)value$; break;
    case 3: numberOfRegions = (java.lang.Integer)value$; break;
    case 4: numberOfRequests = (java.lang.Integer)value$; break;
    case 5: regionsLoad = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.ARegionLoad>)value$; break;
    case 6: storefileIndexSizeInMB = (java.lang.Integer)value$; break;
    case 7: storefiles = (java.lang.Integer)value$; break;
    case 8: storefileSizeInMB = (java.lang.Integer)value$; break;
    case 9: usedHeapMB = (java.lang.Integer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AServerInfo extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerInfo\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}");
  public int infoPort;
  public org.apache.hadoop.hbase.avro.generated.AServerLoad load;
  public org.apache.hadoop.hbase.avro.generated.AServerAddress serverAddress;
  public org.apache.avro.util.Utf8 serverName;
  public long startCode;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return infoPort;
    case 1: return load;
    case 2: return serverAddress;
    case 3: return serverName;
    case 4: return startCode;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: infoPort = (java.lang.Integer)value$; break;
    case 1: load = (org.apache.hadoop.hbase.avro.generated.AServerLoad)value$; break;
    case 2: serverAddress = (org.apache.hadoop.hbase.avro.generated.AServerAddress)value$; break;
    case 3: serverName = (org.apache.avro.util.Utf8)value$; break;
    case 4: startCode = (java.lang.Long)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AColumn extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumn\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}");
  public java.nio.ByteBuffer family;
  public java.nio.ByteBuffer qualifier;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return family;
    case 1: return qualifier;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: family = (java.nio.ByteBuffer)value$; break;
    case 1: qualifier = (java.nio.ByteBuffer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class IOError extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"IOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 message;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return message;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: message = (org.apache.avro.util.Utf8)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class APut extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"APut\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}}}]}");
  public java.nio.ByteBuffer row;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumnValue> columnValues;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return row;
    case 1: return columnValues;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: row = (java.nio.ByteBuffer)value$; break;
    case 1: columnValues = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumnValue>)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public enum ACompressionAlgorithm { 
  LZO, GZ, NONE
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class ATableExists extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"ATableExists\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 message;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return message;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: message = (org.apache.avro.util.Utf8)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AIOError extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ =
    org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 message;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return message;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: message = (org.apache.avro.util.Utf8)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AAlreadyExists extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AAlreadyExists\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
  public org.apache.avro.util.Utf8 message;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return message;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: message = (org.apache.avro.util.Utf8)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public interface HBase {
  public static final org.apache.avro.Protocol PROTOCOL = org.apache.avro.Protocol.parse("{\"protocol\":\"HBase\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"types\":[{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":\"ARegionLoad\"}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":\"AServerLoad\"},{\"name\":\"serverAddress\",\"type\":\"AServerAddress\"},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AClusterStatus\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":\"AServerInfo\"}},{\"name\":\"servers\",\"type\":\"int\"}]},{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[\"ACompressionAlgorithm\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]},{\"type\":\"record\",\"name\":\"ATableDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"families\",\"type\":[{\"type\":\"array\",\"items\":\"AFamilyDescriptor\"},\"null\"]},{\"name\":\"maxFileSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"memStoreFlushSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"rootRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaTable\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"readOnly\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"deferredLogFlush\",\"type\":[\"boolean\",\"null\"]}]},{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]},{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AGet\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[\"ATimeRange\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]},{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AResult\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":\"AResultEntry\"}}]},{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]},{\"type\":\"record\",\"name\":\"APut\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":\"AColumnValue\"}}]},{\"type\":\"record\",\"name\":\"ADelete\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]}]},{\"type\":\"record\",\"name\":\"AScan\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[\"ATimeRange\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]},{\"type\":\"error\",\"name\":\"AIOError\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"ATableExists\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}],\"messages\":{\"getHBaseVersion\":{\"request\":[],\"response\":\"string\",\"errors\":[\"AIOError\"]},\"getClusterStatus\":{\"request\":[],\"response\":\"AClusterStatus\",\"errors\":[\"AIOError\"]},\"listTables\":{\"request\":[],\"response\":{\"type\":\"array\",\"items\":\"ATableDescriptor\"},\"errors\":[\"AIOError\"]},\"describeTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"ATableDescriptor\",\"errors\":[\"AIOError\"]},\"isTableEnabled\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"tableExists\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"describeFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"}],\"response\":\"AFamilyDescriptor\",\"errors\":[\"AIOError\"]},\"createTable\":{\"request\":[{\"name\":\"table\",\"type\":\"ATableDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\",\"AIllegalArgument\",\"ATableExists\",\"AMasterNotRunning\"]},\"deleteTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"modifyTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"tableDescriptor\",\"type\":\"ATableDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"enableTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"disableTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"flush\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"split\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"addFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"AFamilyDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"deleteFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"modifyFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"familyName\",\"type\":\"bytes\"},{\"name\":\"familyDescriptor\",\"type\":\"AFamilyDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"get\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"get\",\"type\":\"AGet\"}],\"response\":\"AResult\",\"errors\":[\"AIOError\"]},\"exists\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"get\",\"type\":\"AGet\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"put\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"put\",\"type\":\"APut\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"delete\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"delete\",\"type\":\"ADelete\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"incrementColumnValue\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"amount\",\"type\":\"long\"},{\"name\":\"writeToWAL\",\"type\":\"boolean\"}],\"response\":\"long\",\"errors\":[\"AIOError\"]},\"scannerOpen\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"scan\",\"type\":\"AScan\"}],\"response\":\"int\",\"errors\":[\"AIOError\"]},\"scannerClose\":{\"request\":[{\"name\":\"scannerId\",\"type\":\"int\"}],\"response\":\"null\",\"errors\":[\"AIOError\",\"AIllegalArgument\"]},\"scannerGetRows\":{\"request\":[{\"name\":\"scannerId\",\"type\":\"int\"},{\"name\":\"numberOfRows\",\"type\":\"int\"}],\"response\":{\"type\":\"array\",\"items\":\"AResult\"},\"errors\":[\"AIOError\",\"AIllegalArgument\"]}}}");
  org.apache.avro.util.Utf8 getHBaseVersion()
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  org.apache.hadoop.hbase.avro.generated.AClusterStatus getClusterStatus()
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.ATableDescriptor> listTables()
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  org.apache.hadoop.hbase.avro.generated.ATableDescriptor describeTable(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  boolean isTableEnabled(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  boolean tableExists(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor describeFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void createTable(org.apache.hadoop.hbase.avro.generated.ATableDescriptor table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument, org.apache.hadoop.hbase.avro.generated.ATableExists, org.apache.hadoop.hbase.avro.generated.AMasterNotRunning;
  java.lang.Void deleteTable(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void modifyTable(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ATableDescriptor tableDescriptor)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void enableTable(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void disableTable(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void flush(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void split(java.nio.ByteBuffer table)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void addFamily(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor family)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void deleteFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void modifyFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer familyName, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor familyDescriptor)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  org.apache.hadoop.hbase.avro.generated.AResult get(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  boolean exists(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void put(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.APut put)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  java.lang.Void delete(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ADelete delete)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  long incrementColumnValue(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, long amount, boolean writeToWAL)
    throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
  int scannerOpen(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AScan scan)

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class AGet extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AGet\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}");
  public java.nio.ByteBuffer row;
  public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn> columns;
  public java.lang.Long timestamp;
  public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange;
  public java.lang.Integer maxVersions;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return row;
    case 1: return columns;
    case 2: return timestamp;
    case 3: return timerange;
    case 4: return maxVersions;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: row = (java.nio.ByteBuffer)value$; break;
    case 1: columns = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn>)value$; break;
    case 2: timestamp = (java.lang.Long)value$; break;
    case 3: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break;
    case 4: maxVersions = (java.lang.Integer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class ATimeRange extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ATimeRange\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]}");
  public long minStamp;
  public long maxStamp;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return minStamp;
    case 1: return maxStamp;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: minStamp = (java.lang.Long)value$; break;
    case 1: maxStamp = (java.lang.Long)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java
 =======================================================================
package org.apache.hadoop.hbase.avro.generated;

@SuppressWarnings("all")
public class ARegionLoad extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
  public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ARegionLoad\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}");
  public int memStoreSizeMB;
  public java.nio.ByteBuffer name;
  public int storefileIndexSizeMB;
  public int storefiles;
  public int storefileSizeMB;
  public int stores;
  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
  public java.lang.Object get(int field$) {
    switch (field$) {
    case 0: return memStoreSizeMB;
    case 1: return name;
    case 2: return storefileIndexSizeMB;
    case 3: return storefiles;
    case 4: return storefileSizeMB;
    case 5: return stores;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
  @SuppressWarnings(value="unchecked")
  public void put(int field$, java.lang.Object value$) {
    switch (field$) {
    case 0: memStoreSizeMB = (java.lang.Integer)value$; break;
    case 1: name = (java.nio.ByteBuffer)value$; break;
    case 2: storefileIndexSizeMB = (java.lang.Integer)value$; break;
    case 3: storefiles = (java.lang.Integer)value$; break;
    case 4: storefileSizeMB = (java.lang.Integer)value$; break;
    case 5: stores = (java.lang.Integer)value$; break;
    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
    }
  }
}

 =======================================================================
 ==src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
 =======================================================================
package org.apache.hadoop.hbase.filter;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import java.util.TreeSet;

import org.apache.hadoop.hbase.KeyValue;

/**
 * Filter that returns only cells whose timestamp (version) is
 * in the specified list of timestamps (versions).
 * <p>
 * Note: Use of this filter overrides any time range/time stamp
 * options specified using {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)},
 * {@link org.apache.hadoop.hbase.client.Scan#setTimeRange(long, long)}, {@link org.apache.hadoop.hbase.client.Get#setTimeStamp(long)},
 * or {@link org.apache.hadoop.hbase.client.Scan#setTimeStamp(long)}.
 */
public class TimestampsFilter extends FilterBase {

  TreeSet<Long> timestamps;

  // Used during scans to hint the scan to stop early
  // once the timestamps fall below the minTimeStamp.
  long minTimeStamp = Long.MAX_VALUE;

  /**
   * Used during deserialization. Do not use otherwise.
   */
  public TimestampsFilter() {
    super();
  }

  /**
   * Constructor for filter that retains only those
   * cells whose timestamp (version) is in the specified
   * list of timestamps.
   *
   * @param timestamps
   */
  public TimestampsFilter(List<Long> timestamps) {
    this.timestamps = new TreeSet<Long>(timestamps);
    init();
  }

  private void init() {
    if (this.timestamps.size() > 0) {
      minTimeStamp = this.timestamps.first();
    }

 =======================================================================
 ==src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/Hbase.html
 =======================================================================
<html><head>
<link href="style.css" rel="stylesheet" type="text/css"/>
<title>Thrift module: Hbase</title></head><body>
<h1>Thrift module: Hbase</h1>
<table><tr><th>Module</th><th>Services</th><th>Data types</th><th>Constants</th></tr>
<tr>
<td>Hbase</td><td><a href="Hbase.html#Svc_Hbase">Hbase</a><br/>
<ul>
<li><a href="Hbase.html#Fn_Hbase_atomicIncrement">atomicIncrement</a></li>
<li><a href="Hbase.html#Fn_Hbase_compact">compact</a></li>
<li><a href="Hbase.html#Fn_Hbase_createTable">createTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAll">deleteAll</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAllRow">deleteAllRow</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAllRowTs">deleteAllRowTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAllTs">deleteAllTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteTable">deleteTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_disableTable">disableTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_enableTable">enableTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_get">get</a></li>
<li><a href="Hbase.html#Fn_Hbase_getColumnDescriptors">getColumnDescriptors</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRow">getRow</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRowTs">getRowTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRowWithColumns">getRowWithColumns</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRowWithColumnsTs">getRowWithColumnsTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_getTableNames">getTableNames</a></li>
<li><a href="Hbase.html#Fn_Hbase_getTableRegions">getTableRegions</a></li>
<li><a href="Hbase.html#Fn_Hbase_getVer">getVer</a></li>
<li><a href="Hbase.html#Fn_Hbase_getVerTs">getVerTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_isTableEnabled">isTableEnabled</a></li>
<li><a href="Hbase.html#Fn_Hbase_majorCompact">majorCompact</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRow">mutateRow</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRowTs">mutateRowTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRows">mutateRows</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRowsTs">mutateRowsTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerClose">scannerClose</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerGet">scannerGet</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerGetList">scannerGetList</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpen">scannerOpen</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenTs">scannerOpenTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenWithPrefix">scannerOpenWithPrefix</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStop">scannerOpenWithStop</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStopTs">scannerOpenWithStopTs</a></li>
</ul>
</td>
<td><a href="Hbase.html#Struct_AlreadyExists">AlreadyExists</a><br/>
<a href="Hbase.html#Struct_BatchMutation">BatchMutation</a><br/>
<a href="Hbase.html#Typedef_Bytes">Bytes</a><br/>
<a href="Hbase.html#Struct_ColumnDescriptor">ColumnDescriptor</a><br/>
<a href="Hbase.html#Struct_IOError">IOError</a><br/>
<a href="Hbase.html#Struct_IllegalArgument">IllegalArgument</a><br/>

 =======================================================================
 ==src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/index.html
 =======================================================================
<html><head>
<link href="style.css" rel="stylesheet" type="text/css"/>
<title>All Thrift declarations</title></head><body>
<h1>All Thrift declarations</h1>
<table><tr><th>Module</th><th>Services</th><th>Data types</th><th>Constants</th></tr>
<tr>
<td>Hbase</td><td><a href="Hbase.html#Svc_Hbase">Hbase</a><br/>
<ul>
<li><a href="Hbase.html#Fn_Hbase_atomicIncrement">atomicIncrement</a></li>
<li><a href="Hbase.html#Fn_Hbase_compact">compact</a></li>
<li><a href="Hbase.html#Fn_Hbase_createTable">createTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAll">deleteAll</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAllRow">deleteAllRow</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAllRowTs">deleteAllRowTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteAllTs">deleteAllTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_deleteTable">deleteTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_disableTable">disableTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_enableTable">enableTable</a></li>
<li><a href="Hbase.html#Fn_Hbase_get">get</a></li>
<li><a href="Hbase.html#Fn_Hbase_getColumnDescriptors">getColumnDescriptors</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRow">getRow</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRowTs">getRowTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRowWithColumns">getRowWithColumns</a></li>
<li><a href="Hbase.html#Fn_Hbase_getRowWithColumnsTs">getRowWithColumnsTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_getTableNames">getTableNames</a></li>
<li><a href="Hbase.html#Fn_Hbase_getTableRegions">getTableRegions</a></li>
<li><a href="Hbase.html#Fn_Hbase_getVer">getVer</a></li>
<li><a href="Hbase.html#Fn_Hbase_getVerTs">getVerTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_isTableEnabled">isTableEnabled</a></li>
<li><a href="Hbase.html#Fn_Hbase_majorCompact">majorCompact</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRow">mutateRow</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRowTs">mutateRowTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRows">mutateRows</a></li>
<li><a href="Hbase.html#Fn_Hbase_mutateRowsTs">mutateRowsTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerClose">scannerClose</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerGet">scannerGet</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerGetList">scannerGetList</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpen">scannerOpen</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenTs">scannerOpenTs</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenWithPrefix">scannerOpenWithPrefix</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStop">scannerOpenWithStop</a></li>
<li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStopTs">scannerOpenWithStopTs</a></li>
</ul>
</td>
<td><a href="Hbase.html#Struct_AlreadyExists">AlreadyExists</a><br/>
<a href="Hbase.html#Struct_BatchMutation">BatchMutation</a><br/>
<a href="Hbase.html#Typedef_Bytes">Bytes</a><br/>
<a href="Hbase.html#Struct_ColumnDescriptor">ColumnDescriptor</a><br/>
<a href="Hbase.html#Struct_IOError">IOError</a><br/>
<a href="Hbase.html#Struct_IllegalArgument">IllegalArgument</a><br/>

 =======================================================================
 ==src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css
 =======================================================================
/* Auto-generated CSS for generated Thrift docs */
body { font-family: Tahoma, sans-serif; }
pre { background-color: #dddddd; padding: 6px; }
h3,h4 { padding-top: 0px; margin-top: 0px; }
div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
div.extends { margin: -0.5em 0 1em 5em }
table { border: 1px solid grey; border-collapse: collapse; }
td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
th { border: 1px solid black; background-color: #bbbbbb;
     text-align: left; padding: 1px 6px; }

 =======================================================================
 ==src/test/ruby/test_helper.rb
 =======================================================================
require 'test/unit'

module Testing
  module Declarative
    # define_test "should do something" do
    #   ...
    # end
    def define_test(name, &block)
      test_name = "test_#{name.gsub(/\s+/,'_')}".to_sym
      defined = instance_method(test_name) rescue false
      raise "#{test_name} is already defined in #{self}" if defined
      if block_given?
        define_method(test_name, &block)
      else
        define_method(test_name) do
          flunk "No implementation provided for #{name}"
        end
      end
    end
  end
end

module Hbase
  module TestHelpers
    def setup_hbase
      @formatter = Shell::Formatter::Console.new()
      @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
    end

    def table(table)
      @hbase.table(table, @formatter)
    end

    def admin
      @hbase.admin(@formatter)
    end

    def create_test_table(name)
      # Create the table if needed
      unless admin.exists?(name)
        admin.create name, [{'NAME' => 'x', 'VERSIONS' => 5}, 'y']
        return
      end

      # Enable the table if needed
      unless admin.enabled?(name)
        admin.enable(name)
      end
    end


 =======================================================================
 ==src/test/resources/log4j.properties
 =======================================================================
# Define some default values that can be overridden by system properties
hbase.root.logger=INFO,console
hbase.log.dir=.
hbase.log.file=hbase.log

# Define the root logger to the system property "hbase.root.logger".
log4j.rootLogger=${hbase.root.logger}

# Logging Threshold
log4j.threshhold=ALL

#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}

# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd

# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout

# Pattern format: Date LogLevel LoggerName LogMessage
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

# Debugging Pattern format
log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n


#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n

# Custom Logging levels

#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG

log4j.logger.org.apache.hadoop=WARN
log4j.logger.org.apache.zookeeper=ERROR
log4j.logger.org.apache.hadoop.hbase=DEBUG

 =======================================================================
 ==src/test/resources/mapred-queues.xml
 =======================================================================
<?xml version="1.0"?>
<!-- This is the template for queue configuration. The format supports nesting of
     queues within queues - a feature called hierarchical queues. All queues are
     defined within the 'queues' tag which is the top level element for this
     XML document.
     The 'aclsEnabled' attribute should be set to true, if ACLs should be checked
     on queue operations such as submitting jobs, killing jobs etc. -->
<queues aclsEnabled="false">

  <!-- Configuration for a queue is specified by defining a 'queue' element. -->
  <queue>

    <!-- Name of a queue. Queue name cannot contain a ':'  -->
    <name>default</name>

    <!-- properties for a queue, typically used by schedulers,
    can be defined here -->
    <properties>
    </properties>

	<!-- State of the queue. If running, the queue will accept new jobs.
         If stopped, the queue will not accept new jobs. -->
    <state>running</state>

    <!-- Specifies the ACLs to check for submitting jobs to this queue.
         If set to '*', it allows all users to submit jobs to the queue.
         For specifying a list of users and groups the format to use is
         user1,user2 group1,group2 -->
    <acl-submit-job>*</acl-submit-job>

    <!-- Specifies the ACLs to check for modifying jobs in this queue.
         Modifications include killing jobs, tasks of jobs or changing
         priorities.
         If set to '*', it allows all users to submit jobs to the queue.
         For specifying a list of users and groups the format to use is
         user1,user2 group1,group2 -->
    <acl-administer-jobs>*</acl-administer-jobs>
  </queue>

  <!-- Here is a sample of a hierarchical queue configuration
       where q2 is a child of q1. In this example, q2 is a leaf level
       queue as it has no queues configured within it. Currently, ACLs
       and state are only supported for the leaf level queues.
       Note also the usage of properties for the queue q2.
  <queue>
    <name>q1</name>
    <queue>
      <name>q2</name>
      <properties>
        <property key="capacity" value="20"/>

 =======================================================================
 ==src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 =======================================================================
package org.apache.hadoop.hbase.client.replication;

import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager;
import org.junit.BeforeClass;
import org.junit.Test;

import static org.junit.Assert.fail;
import static org.junit.Assert.assertEquals;

/**
 * Unit testing of ReplicationAdmin
 */
public class TestReplicationAdmin {

  private static final Log LOG =
      LogFactory.getLog(TestReplicationAdmin.class);
  private final static HBaseTestingUtility TEST_UTIL =
      new HBaseTestingUtility();

  private final String ID_ONE = "1";
  private final String KEY_ONE = "127.0.0.1:2181:/hbase";
  private final String ID_SECOND = "2";
  private final String KEY_SECOND = "127.0.0.1:2181:/hbase2";

  private static ReplicationSourceManager manager;
  private static ReplicationAdmin admin;
  private static AtomicBoolean replicating = new AtomicBoolean(true);

  /**
   * @throws java.lang.Exception
   */
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.startMiniZKCluster();
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
    admin = new ReplicationAdmin(conf);
    Path oldLogDir = new Path(TEST_UTIL.getTestDir(),
        HConstants.HREGION_OLDLOGDIR_NAME);
    Path logDir = new Path(TEST_UTIL.getTestDir(),
        HConstants.HREGION_LOGDIR_NAME);

 =======================================================================
 ==src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java
 =======================================================================
package org.apache.hadoop.hbase.filter;

import static org.junit.Assert.*;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;

public class TestColumnPrefixFilter {

  private final static HBaseTestingUtility TEST_UTIL = new
      HBaseTestingUtility();

  @Test
  public void testColumnPrefixFilter() throws IOException {
    String family = "Family";
    HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
    htd.addFamily(new HColumnDescriptor(family));
    HRegionInfo info = new HRegionInfo(htd, null, null, false);
    HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
        getTestDir(), TEST_UTIL.getConfiguration());

    List<String> rows = generateRandomWords(100, "row");
    List<String> columns = generateRandomWords(10000, "column");
    long maxTimestamp = 2;

    List<KeyValue> kvList = new ArrayList<KeyValue>();

    Map<String, List<KeyValue>> prefixMap = new HashMap<String,
        List<KeyValue>>();

    prefixMap.put("p", new ArrayList<KeyValue>());
    prefixMap.put("s", new ArrayList<KeyValue>());

 =======================================================================
 ==src/examples/mapreduce/index-builder-setup.rb
 =======================================================================
# Set up sample data for IndexBuilder example
create "people", "attributes"
create "people-email", "INDEX"
create "people-phone", "INDEX"
create "people-name", "INDEX"

[["1", "jenny", "jenny@example.com", "867-5309"],
 ["2", "alice", "alice@example.com", "555-1234"],
 ["3", "kevin", "kevinpet@example.com", "555-1212"]].each do |fields|
  (id, name, email, phone) = *fields
  put "people", id, "attributes:name", name
  put "people", id, "attributes:email", email
  put "people", id, "attributes:phone", phone
end
  

 =======================================================================
 ==partitions_1311186390587
 =======================================================================
SEQ?1org.apache.hadoop.hbase.io.ImmutableBytesWritable!org.apache.hadoop.io.NullWritable??*org.apache.hadoop.io.compress.DefaultCodec?????U0?0??v)??e?׻????????????aaax???????????????????gggx???????????????????zzzx???????

 =======================================================================
 ==partitions_1311317807683
 =======================================================================
SEQ?1org.apache.hadoop.hbase.io.ImmutableBytesWritable!org.apache.hadoop.io.NullWritable??*org.apache.hadoop.io.compress.DefaultCodec???????????Ɍ??d?????????????aaax???????????????????gggx???????????????????zzzx???????

 =======================================================================
 ==pom.xml
 =======================================================================
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
  <modelVersion>4.0.0</modelVersion>

  <parent>
    <groupId>org.apache</groupId>
    <artifactId>apache</artifactId>
    <version>8</version>
  </parent>

  <groupId>org.apache.hbase</groupId>
  <artifactId>hbase</artifactId>
  <packaging>jar</packaging>
  <version>0.90.4</version>
  <name>HBase</name>
  <description>
    HBase is the &amp;lt;a href="http://hadoop.apache.org"&amp;rt;Hadoop&lt;/a&amp;rt; database. Use it when you need
    random, realtime read/write access to your Big Data.
    This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters
    of commodity hardware.
  </description>
  <url>http://hbase.apache.org</url>

  <scm>
    <connection>scm:svn:http://svn.apache.org/repos/asf/hbase/tags/0.90.2RC0</connection>
    <developerConnection>scm:svn:https://svn.apache.org/repos/asf/hbase/tags/0.90.2RC0</developerConnection>
    <url>http://svn.apache.org/viewvc/hbase/tags/0.90.2RC0</url>
  </scm>

  <issueManagement>
    <system>JIRA</system>
    <url>http://issues.apache.org/jira/browse/HBASE</url>
  </issueManagement>

  <ciManagement>
    <system>hudson</system>
    <url>http://hudson.zones.apache.org/hudson/view/HBase/job/HBase-TRUNK/</url>
  </ciManagement>

  <mailingLists>
    <mailingList>
      <name>User List</name>
      <subscribe>user-subscribe@hbase.apache.org</subscribe>
      <unsubscribe>user-unsubscribe@hbase.apache.org</unsubscribe>
      <post>user@hbase.apache.org</post>
      <archive>http://mail-archives.apache.org/mod_mbox/hbase-user/</archive>
      <otherArchives>
        <otherArchive>http://dir.gmane.org/gmane.comp.java.hadoop.hbase.user</otherArchive>
        <otherArchive>http://search-hadoop.com/?q=&amp;fc_project=HBase</otherArchive>
      </otherArchives>

 =======================================================================
 ==bin/local-master-backup.sh
 =======================================================================
#!/bin/sh
# This is used for starting multiple masters on the same machine.
# run it from hbase-dir/ just like 'bin/hbase'
# Supports up to 10 masters (limitation = overlapping ports)

bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin" >/dev/null && pwd`

if [ $# -lt 2 ]; then
  S=`basename "${BASH_SOURCE-$0}"`
  echo "Usage: $S [start|stop] offset(s)"
  echo ""
  echo "    e.g. $S start 1"
  exit
fi

# sanity check: make sure your master opts don't use ports [i.e. JMX/DBG]
export HBASE_MASTER_OPTS=" "

run_master () {
  DN=$2
  export HBASE_IDENT_STRING="$USER-$DN"
  HBASE_MASTER_ARGS="\
    -D hbase.master.port=`expr 60000 + $DN` \
    -D hbase.master.info.port=`expr 60010 + $DN` \
    --backup"
  "$bin"/hbase-daemon.sh $1 master $HBASE_MASTER_ARGS
}

cmd=$1
shift;

for i in $*
do
  run_master  $cmd $i
done

 =======================================================================
 ==bin/local-regionservers.sh
 =======================================================================
#!/bin/sh
# This is used for starting multiple regionservers on the same machine.
# run it from hbase-dir/ just like 'bin/hbase'
# Supports up to 100 regionservers (limitation = overlapping ports)

bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin" >/dev/null && pwd`

if [ $# -lt 2 ]; then
  S=`basename "${BASH_SOURCE-$0}"`
  echo "Usage: $S [start|stop] offset(s)"
  echo ""
  echo "    e.g. $S start 1 2"
  exit
fi

# sanity check: make sure your regionserver opts don't use ports [i.e. JMX/DBG]
export HBASE_REGIONSERVER_OPTS=" "

run_regionserver () {
  DN=$2
  export HBASE_IDENT_STRING="$USER-$DN"
  HBASE_REGIONSERVER_ARGS="\
    -D hbase.regionserver.port=`expr 60200 + $DN` \
    -D hbase.regionserver.info.port=`expr 60300 + $DN`"
  "$bin"/hbase-daemon.sh $1 regionserver $HBASE_REGIONSERVER_ARGS
}

cmd=$1
shift;

for i in $*
do
  run_regionserver  $cmd $i
done

 =======================================================================
 ==bin/set_meta_block_caching.rb
 =======================================================================
# Set in_memory=true and blockcache=true on catalog tables.
# The .META. and -ROOT- tables can be created with caching and
# in_memory set to false.  You want them set to true so that
# these hot tables make it into cache.  To see if the
# .META. table has BLOCKCACHE set, in the shell do the following:
#
#   hbase> scan '-ROOT-'
#
# Look for the 'info' column family.  See if BLOCKCACHE => 'true'? 
# If not, run this script and it will set the value to true.
# Setting cache to 'true' will only take effect on region restart
# of if you close the .META. region -- *disruptive* -- and have
# it deploy elsewhere.  This script runs against an up and running
# hbase instance.
# 
# To see usage for this script, run: 
#
#  ${HBASE_HOME}/bin/hbase org.jruby.Main set_meta_block_caching.rb
#
include Java
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.HRegionInfo
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.util.FSUtils
import org.apache.hadoop.hbase.util.Writables
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.FileSystem
import org.apache.commons.logging.LogFactory

# Name of this script
NAME = "set_meta_block_caching.rb"


# Print usage for this script
def usage
  puts 'Usage: %s.rb]' % NAME
  exit!
end

# Get configuration to use.
c = HBaseConfiguration.new()

# Set hadoop filesystem configuration using the hbase.rootdir.
# Otherwise, we'll always use localhost though the hbase.rootdir

 =======================================================================
 ==conf/log4j.properties
 =======================================================================
# Define some default values that can be overridden by system properties
hbase.root.logger=INFO,console
hbase.log.dir=.
hbase.log.file=hbase.log

# Define the root logger to the system property "hbase.root.logger".
log4j.rootLogger=${hbase.root.logger}

# Logging Threshold
log4j.threshold=ALL

#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}

# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd

# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout

# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n


#
# console
# Add "console" to rootlogger above if you want to use this 
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n

# Custom Logging levels

log4j.logger.org.apache.zookeeper=INFO
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
log4j.logger.org.apache.hadoop.hbase=DEBUG
# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
#log4j.logger.org.apache.hadoop.dfs=DEBUG
# Set this class to log INFO only otherwise its OTT

 =======================================================================
 ==conf/hadoop-metrics.properties
 =======================================================================
# See http://wiki.apache.org/hadoop/GangliaMetrics
# Make sure you know whether you are using ganglia 3.0 or 3.1.
# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
# And, yes, this file is named hadoop-metrics.properties rather than
# hbase-metrics.properties because we're leveraging the hadoop metrics
# package and hadoop-metrics.properties is an hardcoded-name, at least
# for the moment.
#
# See also http://hadoop.apache.org/hbase/docs/current/metrics.html

# Configuration of the "hbase" context for null
hbase.class=org.apache.hadoop.metrics.spi.NullContext

# Configuration of the "hbase" context for file
# hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
# hbase.period=10
# hbase.fileName=/tmp/metrics_hbase.log

# HBase-specific configuration to reset long-running stats (e.g. compactions)
# If this variable is left out, then the default is no expiration.
hbase.extendedperiod = 3600

# Configuration of the "hbase" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# hbase.period=10
# hbase.servers=GMETADHOST_IP:8649

# Configuration of the "jvm" context for null
jvm.class=org.apache.hadoop.metrics.spi.NullContext

# Configuration of the "jvm" context for file
# jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
# jvm.period=10
# jvm.fileName=/tmp/metrics_jvm.log

# Configuration of the "jvm" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# jvm.period=10
# jvm.servers=GMETADHOST_IP:8649

# Configuration of the "rpc" context for null
rpc.class=org.apache.hadoop.metrics.spi.NullContext

# Configuration of the "rpc" context for file
# rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
# rpc.period=10

 =======================================================================
 ==conf/regionservers
 =======================================================================
localhost