Deprecated API


Contents
Deprecated Interfaces
org.apache.hadoop.hbase.mapred.TableMap
           
org.apache.hadoop.hbase.mapred.TableReduce
           
 

Deprecated Classes
org.apache.hadoop.hbase.io.hfile.DoubleBlockCache
          As of 1.0, replaced by BucketCache. 
org.apache.hadoop.hbase.mapred.Driver
           
org.apache.hadoop.hbase.metrics.ExactCounterMetric
           
org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
          will be removed for the major release after 0.96. 
org.apache.hadoop.hbase.mapred.GroupingTableMap
           
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat
          use HFileOutputFormat2 instead. 
org.apache.hadoop.hbase.mapred.HRegionPartitioner
           
org.apache.hadoop.hbase.client.HTableFactory
          as of 0.98.1. See HConnectionManager.createConnection(Configuration). 
org.apache.hadoop.hbase.client.HTablePool
          as of 0.98.1. See HConnection.getTable(String). 
org.apache.hadoop.hbase.mapred.IdentityTableMap
           
org.apache.hadoop.hbase.mapred.IdentityTableReduce
           
org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB
          will be removed for the major release after 0.96. 
org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram
           
org.apache.hadoop.hbase.metrics.MetricsMBeanBase
           
org.apache.hadoop.hbase.metrics.MetricsRate
           
org.apache.hadoop.hbase.metrics.MetricsString
           
org.apache.hadoop.hbase.metrics.PersistentMetricsTimeVaryingRate
           
org.apache.hadoop.hbase.mapred.RowCounter
           
org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache
          As of 1.0, replaced by BucketCache. 
org.apache.hadoop.hbase.io.hfile.slab.SlabCache
          As of 1.0, replaced by BucketCache. 
org.apache.hadoop.hbase.mapred.TableInputFormat
           
org.apache.hadoop.hbase.mapred.TableInputFormatBase
           
org.apache.hadoop.hbase.mapred.TableMapReduceUtil
           
org.apache.hadoop.hbase.mapred.TableOutputFormat
           
org.apache.hadoop.hbase.mapred.TableRecordReader
           
org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
           
org.apache.hadoop.hbase.mapred.TableSplit
           
org.apache.hadoop.hbase.zookeeper.ZKLeaderManager
          Not used 
org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData
          Unused 
 

Deprecated Fields
org.apache.hadoop.hbase.HTableDescriptor.DEFERRED_LOG_FLUSH
          Use HTableDescriptor.DURABILITY instead. 
org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner.END
           
org.apache.hadoop.hbase.HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY
          This config option is deprecated. Will be removed at later releases after 0.96. 
org.apache.hadoop.hbase.client.Scan.HINT_LOOKAHEAD
          without replacement This is now a no-op, SEEKs and SKIPs are optimizated automatically. 
org.apache.hadoop.hbase.HConstants.META_TABLE_NAME
           
org.apache.hadoop.hbase.HTableDescriptor.META_TABLEDESC
          Use TableDescriptors#get(TableName.META_TABLE_NAME) or HBaseAdmin#getTableDescriptor(TableName.META_TABLE_NAME) instead. 
org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY_DEPRECATED
           
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT
          Use SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME instead. 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.SNAPSHOT_TIMEOUT_MILLIS_KEY
          Use SnapshotDescriptionUtils.MASTER_SNAPSHOT_TIMEOUT_MILLIS instead. 
org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner.START
           
org.apache.hadoop.hbase.HRegionInfo.VERSION
           
 

Deprecated Methods
org.apache.hadoop.hbase.regionserver.wal.WALEdit.add(KeyValue)
          Use WALEdit.add(Cell) instead 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String)
          Use addPeer(String, ReplicationPeerConfig, Map) instead. 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.addPeer(String, String, String)
           
org.apache.hadoop.hbase.regionserver.wal.HLog.appendNoSync(HRegionInfo, TableName, WALEdit, List, long, HTableDescriptor, AtomicLong, boolean, long, long)
           
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.atomicIncrement(ByteBuffer, ByteBuffer, ByteBuffer, long)
           
org.apache.hadoop.hbase.client.HTable.batch(List)
          If any exception is thrown by one of the actions, there is no way to retrieve the partially executed results. Use HTable.batch(List, Object[]) instead. 
org.apache.hadoop.hbase.client.HTableInterface.batch(List)
          If any exception is thrown by one of the actions, there is no way to retrieve the partially executed results. Use HTableInterface.batch(List, Object[]) instead. 
org.apache.hadoop.hbase.client.HTable.batchCallback(List, Batch.Callback)
          If any exception is thrown by one of the actions, there is no way to retrieve the partially executed results. Use HTable.batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) instead. 
org.apache.hadoop.hbase.client.HTableInterface.batchCallback(List, Batch.Callback)
          If any exception is thrown by one of the actions, there is no way to retrieve the partially executed results. Use HTableInterface.batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) instead. 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.clearClusterId()
           
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(byte[])
           
org.apache.hadoop.hbase.regionserver.Store.compact(CompactionContext, CompactionThroughputController)
          see compact(CompactionContext, CompactionThroughputController, User) 
org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String)
           
org.apache.hadoop.hbase.mapreduce.CopyTable.createSubmittableJob(Configuration, String[])
          Use CopyTable.createSubmittableJob(String[]) instead 
org.apache.hadoop.hbase.RemoteExceptionHandler.decodeRemoteException(RemoteException)
          Use RemoteException.unwrapRemoteException() instead. In fact we should look into deprecating this whole class - St.Ack 2010929 
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections()
          kept for backward compatibility, but the behavior is broken. HBASE-8983 
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections(boolean)
            
org.apache.hadoop.hbase.client.HConnectionManager.deleteConnection(Configuration)
            
org.apache.hadoop.hbase.client.HConnectionManager.deleteStaleConnection(HConnection)
            
org.apache.hadoop.hbase.filter.FilterWrapper.filterRow(List)
           
org.apache.hadoop.hbase.filter.Filter.filterRow(List)
           
org.apache.hadoop.hbase.filter.FilterList.filterRow(List)
           
org.apache.hadoop.hbase.filter.FilterBase.filterRow(List)
           
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.get(ByteBuffer, ByteBuffer, ByteBuffer, Map)
           
org.apache.hadoop.hbase.client.Mutation.getACLStrategy()
          No effect 
org.apache.hadoop.hbase.client.Query.getACLStrategy()
          No effect 
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName, boolean)
          You can pass master flag but nothing special is done. 
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.getAuths(byte[], boolean)
           
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.getAuths(byte[], boolean)
          Use  
org.apache.hadoop.hbase.KeyValue.getBuffer()
          Since 0.98.0. Use Cell Interface instead. Do not presume single backing buffer. 
org.apache.hadoop.hbase.zookeeper.ZKUtil.getChildDataAndWatchForNewChildren(ZooKeeperWatcher, String)
          Unused 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.getClusterId()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.getClusterId()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterId()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterIdBuilder()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.getClusterIdOrBuilder()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.getClusterIdOrBuilder()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterIdOrBuilder()
           
org.apache.hadoop.hbase.client.Result.getColumn(byte[], byte[])
          Use Result.getColumnCells(byte[], byte[]) instead. 
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], byte[])
          Use Result.getColumnLatestCell(byte[], byte[]) instead. 
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], int, int, byte[], int, int)
          Use Result.getColumnLatestCell(byte[], int, int, byte[], int, int) instead. 
org.apache.hadoop.hbase.client.HTable.getConnection()
          This method will be changed from public to package protected. 
org.apache.hadoop.hbase.client.HConnectionManager.getConnection(Configuration)
           
org.apache.hadoop.hbase.client.HConnection.getCurrentNrHRS()
          This method will be changed from public to package protected. 
org.apache.hadoop.hbase.HColumnDescriptor.getDataBlockEncodingOnDisk()
           
org.apache.hadoop.hbase.Cell.getFamily()
          as of 0.96, use CellUtil.cloneFamily(Cell) 
org.apache.hadoop.hbase.KeyValue.getFamily()
           
org.apache.hadoop.hbase.client.Mutation.getFamilyMap()
          use Mutation.getFamilyCellMap() instead. 
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[])
           
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List)
           
org.apache.hadoop.hbase.client.HConnection.getKeepAliveMasterService()
          Since 0.96.0 
org.apache.hadoop.hbase.regionserver.wal.WALEdit.getKeyValues()
          Use WALEdit.getCells() instead 
org.apache.hadoop.hbase.filter.FilterWrapper.getNextKeyHint(KeyValue)
           
org.apache.hadoop.hbase.filter.Filter.getNextKeyHint(KeyValue)
           
org.apache.hadoop.hbase.filter.FilterList.getNextKeyHint(KeyValue)
           
org.apache.hadoop.hbase.filter.FilterBase.getNextKeyHint(KeyValue)
           
org.apache.hadoop.hbase.HTableDescriptor.getOwnerString()
           
org.apache.hadoop.hbase.Cell.getQualifier()
          as of 0.96, use CellUtil.cloneQualifier(Cell) 
org.apache.hadoop.hbase.KeyValue.getQualifier()
           
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, HRegionInfo)
           
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, String)
           
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(byte[], byte[], boolean)
           
org.apache.hadoop.hbase.Cell.getRow()
          as of 0.96, use CellUtil.getRowByte(Cell, int) 
org.apache.hadoop.hbase.KeyValue.getRow()
           
org.apache.hadoop.hbase.client.HTableInterface.getRowOrBefore(byte[], byte[])
          As of version 0.92 this method is deprecated without replacement. getRowOrBefore is used internally to find entries in hbase:meta and makes various assumptions about the table (which are true for hbase:meta but not in general) to be efficient. 
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean)
           
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean, boolean)
           
org.apache.hadoop.hbase.client.HTable.getScannerCaching()
          Use Scan.setCaching(int) and Scan.getCaching() 
org.apache.hadoop.hbase.ClusterStatus.getServerInfo()
          Use ClusterStatus.getServers() 
org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
          use ImmutableBytesWritable.getLength() instead 
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, HRegionInfo, byte[])
           
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, String, byte[])
           
org.apache.hadoop.hbase.HTableDescriptor.getTableDir(Path, byte[])
           
org.apache.hadoop.hbase.HRegionInfo.getTableName()
          Since 0.96.0; use #getTable() 
org.apache.hadoop.hbase.client.ClientScanner.getTableName()
          Since 0.96.0; use ClientScanner.getTable() 
org.apache.hadoop.hbase.HRegionInfo.getTableName(byte[])
          Since 0.96.0; use #getTable(byte[]) 
org.apache.hadoop.hbase.client.HConnection.getTableNames()
           
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames()
           
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames(Pattern)
           
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames(String)
           
org.apache.hadoop.hbase.Cell.getTagsLength()
          use Cell.getTagsLengthUnsigned() which can handle tags length upto 65535. 
org.apache.hadoop.hbase.KeyValue.getTagsLength()
           
org.apache.hadoop.hbase.NoTagsKeyValue.getTagsLength()
           
org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeCell.getTagsLength()
           
org.apache.hadoop.hbase.Cell.getTagsLengthUnsigned()
          From next major version this will be renamed to getTagsLength() which returns int. 
org.apache.hadoop.hbase.KeyValue.getTagsLengthUnsigned()
           
org.apache.hadoop.hbase.NoTagsKeyValue.getTagsLengthUnsigned()
           
org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeCell.getTagsLengthUnsigned()
           
org.apache.hadoop.hbase.KeyValue.getType()
           
org.apache.hadoop.hbase.Cell.getValue()
          as of 0.96, use CellUtil.cloneValue(Cell) 
org.apache.hadoop.hbase.KeyValue.getValue()
           
org.apache.hadoop.hbase.coprocessor.ColumnInterpreter.getValue(byte[], byte[], KeyValue)
           
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)
           
org.apache.hadoop.hbase.HRegionInfo.getVersion()
          HRI is no longer a VersionedWritable 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)
           
org.apache.hadoop.hbase.client.HTable.getWriteBuffer()
          since 0.96. This is an internal buffer that should not be read nor write. 
org.apache.hadoop.hbase.client.Mutation.getWriteToWAL()
          Use Mutation.getDurability() instead. 
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(Configuration, TableName, String, byte[], byte[], AccessControlProtos.Permission.Action...)
          Use AccessControlClient.grant(Configuration, TableName, String, byte[], byte[], Permission.Action...) instead. 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.hasClusterId()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.hasClusterId()
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.hasClusterId()
           
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.havingSystemAuth(byte[])
           
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.havingSystemAuth(byte[])
          Use  
org.apache.hadoop.hbase.KeyValue.heapSizeWithoutTags()
           
org.apache.hadoop.hbase.client.HTable.incrementColumnValue(byte[], byte[], byte[], long, boolean)
          Use HTable.incrementColumnValue(byte[], byte[], byte[], long, Durability) 
org.apache.hadoop.hbase.client.HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, boolean)
          Use HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, Durability) 
org.apache.hadoop.hbase.regionserver.HRegion.initialize()
          use HRegion.createHRegion() or HRegion.openHRegion() 
org.apache.hadoop.hbase.HTableDescriptor.isDeferredLogFlush()
           
org.apache.hadoop.hbase.KeyValue.isDelete()
           
org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus)
           
org.apache.hadoop.hbase.security.Superusers.isSuperUser(String)
          Use Superusers.isSuperUser(User) 
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[])
           
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[], byte[][])
           
org.apache.hadoop.hbase.client.HConnection.isTableDisabled(byte[])
           
org.apache.hadoop.hbase.client.HConnection.isTableEnabled(byte[])
           
org.apache.hadoop.hbase.client.HTable.isTableEnabled(byte[])
          use HBaseAdmin.isTableEnabled(byte[]) 
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, byte[])
          use HBaseAdmin.isTableEnabled(byte[]) 
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, String)
          use HBaseAdmin.isTableEnabled(byte[]) 
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, TableName)
          use HBaseAdmin.isTableEnabled(org.apache.hadoop.hbase.TableName tableName) 
org.apache.hadoop.hbase.client.HTable.isTableEnabled(String)
          use HBaseAdmin.isTableEnabled(byte[]) 
org.apache.hadoop.hbase.client.HTable.isTableEnabled(TableName)
          use HBaseAdmin.isTableEnabled(byte[]) 
org.apache.hadoop.hbase.client.Result.list()
          as of 0.96, use Result.listCells() 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeers()
          use ReplicationAdmin.listPeerConfigs() 
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[], byte[])
           
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[])
           
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[], boolean, boolean)
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.mergeClusterId(HBaseProtos.UUID)
           
org.apache.hadoop.hbase.security.token.TokenUtil.obtainAndCacheToken(Configuration, UserGroupInformation)
          Replaced by TokenUtil.obtainAndCacheToken(HConnection,User) 
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(Configuration, Job)
          Use TokenUtil.obtainAuthTokenForJob(HConnection,User,Job) instead. 
org.apache.hadoop.hbase.security.User.obtainAuthTokenForJob(JobConf)
          Use TokenUtil.obtainAuthTokenForJob(HConnection,JobConf,User) instead. 
org.apache.hadoop.hbase.security.token.TokenUtil.obtainToken(Configuration)
          Replaced by TokenUtil.obtainToken(HConnection) 
org.apache.hadoop.hbase.security.token.TokenUtil.obtainTokenForJob(Configuration, UserGroupInformation, Job)
          Replaced by TokenUtil.obtainTokenForJob(HConnection,User,Job) 
org.apache.hadoop.hbase.security.token.TokenUtil.obtainTokenForJob(JobConf, UserGroupInformation)
          Replaced by TokenUtil.obtainTokenForJob(HConnection,JobConf,User) 
org.apache.hadoop.hbase.KeyValue.oswrite(KeyValue, OutputStream)
           
org.apache.hadoop.hbase.zookeeper.ZKConfig.parseZooCfg(Configuration, InputStream)
          in 0.96 onwards. HBase will no longer rely on zoo.cfg availability. 
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompact(ObserverContext, Store, StoreFile)
          Use RegionObserver.postCompact(ObserverContext, Store, StoreFile, CompactionRequest) instead 
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList)
          use RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList, CompactionRequest) instead. 
org.apache.hadoop.hbase.coprocessor.RegionObserver.postFlush(ObserverContext)
          use RegionObserver.preFlush(ObserverContext, Store, InternalScanner) instead. 
org.apache.hadoop.hbase.coprocessor.BaseRegionObserver.postGet(ObserverContext, Get, List)
           
org.apache.hadoop.hbase.coprocessor.RegionObserver.postGet(ObserverContext, Get, List)
           
org.apache.hadoop.hbase.coprocessor.RegionObserver.postIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean, long)
          This hook is no longer called by the RegionServer 
org.apache.hadoop.hbase.regionserver.RegionServerServices.postOpenDeployTasks(HRegion, CatalogTracker)
          use #postOpenDeployTasks(PostOpenDeployContext) 
org.apache.hadoop.hbase.coprocessor.RegionObserver.postSplit(ObserverContext, HRegion, HRegion)
          Use postCompleteSplit() instead 
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompact(ObserverContext, Store, InternalScanner, ScanType)
          use RegionObserver.preCompact(ObserverContext, Store, InternalScanner, ScanType, CompactionRequest) instead 
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner)
          Use RegionObserver.preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner, CompactionRequest) instead. 
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactSelection(ObserverContext, Store, List)
          Use RegionObserver.preCompactSelection(ObserverContext, Store, List, CompactionRequest) instead 
org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlush(ObserverContext)
          use RegionObserver.preFlush(ObserverContext, Store, InternalScanner) instead 
org.apache.hadoop.hbase.coprocessor.BaseRegionObserver.preGet(ObserverContext, Get, List)
           
org.apache.hadoop.hbase.coprocessor.RegionObserver.preGet(ObserverContext, Get, List)
           
org.apache.hadoop.hbase.coprocessor.RegionObserver.preIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean)
          This hook is no longer called by the RegionServer 
org.apache.hadoop.hbase.coprocessor.RegionObserver.preSplit(ObserverContext)
          Use preSplit( final ObserverContext c, byte[] splitRow) 
org.apache.hadoop.hbase.client.HConnection.processBatch(List, byte[], ExecutorService, Object[])
           
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatch(List, byte[], ExecutorService, Object[])
           
org.apache.hadoop.hbase.client.HConnection.processBatch(List, TableName, ExecutorService, Object[])
          since 0.96 - Use HTableInterface.batch(java.util.List, java.lang.Object[]) instead 
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatch(List, TableName, ExecutorService, Object[])
           
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List, byte[], ExecutorService, Object[], Batch.Callback)
           
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatchCallback(List, byte[], ExecutorService, Object[], Batch.Callback)
           
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List, TableName, ExecutorService, Object[], Batch.Callback)
          since 0.96 - Use HTableInterface.batchCallback(java.util.List, java.lang.Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) instead 
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatchCallback(List, TableName, ExecutorService, Object[], Batch.Callback)
          since 0.96 - Use HTable.processBatchCallback(java.util.List, java.lang.Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) instead 
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], List)
           
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put)
           
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put, int)
           
org.apache.hadoop.hbase.client.HTablePool.putTable(HTableInterface)
            
org.apache.hadoop.hbase.client.Result.raw()
          as of 0.96, use Result.rawCells() 
org.apache.hadoop.hbase.HRegionInfo.readFields(DataInput)
          Use protobuf deserialization instead. 
org.apache.hadoop.hbase.HColumnDescriptor.readFields(DataInput)
          Writables are going away. Use pb HColumnDescriptor.parseFrom(byte[]) instead. 
org.apache.hadoop.hbase.HTableDescriptor.readFields(DataInput)
          Writables are going away. Use pb HTableDescriptor.parseFrom(byte[]) instead. 
org.apache.hadoop.hbase.io.Reference.readFields(DataInput)
          Writables are going away. Use the pb serialization methods instead. Remove in a release after 0.96 goes out. This is here only to migrate old Reference files written with Writables before 0.96. 
org.apache.hadoop.hbase.util.Bytes.readVLong(byte[], int)
          Use #readAsVLong() instead. 
org.apache.hadoop.hbase.client.HConnection.relocateRegion(byte[], byte[])
           
org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, HRegionInfo...)
           
org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, long, HRegionInfo...)
           
org.apache.hadoop.hbase.regionserver.Store.requestCompaction(int, CompactionRequest)
          see requestCompaction(int, CompactionRequest, User) 
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(Configuration, String, TableName, byte[], byte[], AccessControlProtos.Permission.Action...)
          Use AccessControlClient.revoke(Configuration, TableName, String, byte[], byte[], Permission.Action...) instead 
org.apache.hadoop.hbase.client.Mutation.setACLStrategy(boolean)
          No effect 
org.apache.hadoop.hbase.client.Query.setACLStrategy(boolean)
          No effect 
org.apache.hadoop.hbase.client.HTable.setAutoFlush(boolean)
           
org.apache.hadoop.hbase.client.HTableInterface.setAutoFlush(boolean)
          in 0.96. When called with setAutoFlush(false), this function also set clearBufferOnFail to true, which is unexpected but kept for historical reasons. Replace it with setAutoFlush(false, false) if this is exactly what you want, or by HTableInterface.setAutoFlushTo(boolean) for all other cases. 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.setClusterId(HBaseProtos.UUID.Builder)
           
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.setClusterId(HBaseProtos.UUID)
           
org.apache.hadoop.hbase.HTableDescriptor.setDeferredLogFlush(boolean)
           
org.apache.hadoop.hbase.HColumnDescriptor.setEncodeOnDisk(boolean)
           
org.apache.hadoop.hbase.client.Mutation.setFamilyMap(NavigableMap>)
          use Mutation.setFamilyCellMap(NavigableMap) instead. 
org.apache.hadoop.hbase.HColumnDescriptor.setKeepDeletedCells(boolean)
          use HColumnDescriptor.setKeepDeletedCells(KeepDeletedCells) 
org.apache.hadoop.hbase.HTableDescriptor.setName(byte[])
           
org.apache.hadoop.hbase.HTableDescriptor.setName(TableName)
           
org.apache.hadoop.hbase.HTableDescriptor.setOwner(User)
           
org.apache.hadoop.hbase.HTableDescriptor.setOwnerString(String)
           
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.setPeerTableCFs(String, String)
          use ReplicationAdmin.setPeerTableCFs(String, Map) 
org.apache.hadoop.hbase.client.HTable.setScannerCaching(int)
          Use Scan.setCaching(int) 
org.apache.hadoop.hbase.client.Mutation.setWriteToWAL(boolean)
          Use Mutation.setDurability(Durability) instead. 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy.skipStoreFileRangeCheck()
          Use RegionSplitPolicy.skipStoreFileRangeCheck(String)} instead 
org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.stepsAfterPONR(Server, RegionServerServices, HRegion)
           
org.apache.hadoop.hbase.regionserver.SplitTransaction.stepsAfterPONR(Server, RegionServerServices, PairOfSameType)
           
org.apache.hadoop.hbase.filter.FilterWrapper.transform(KeyValue)
           
org.apache.hadoop.hbase.filter.Filter.transform(KeyValue)
           
org.apache.hadoop.hbase.filter.FilterList.transform(KeyValue)
           
org.apache.hadoop.hbase.filter.FilterBase.transform(KeyValue)
           
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection)
          Use LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, TableName, byte[], Collection) 
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(byte[], byte[], Object, HRegionLocation)
           
org.apache.hadoop.hbase.zookeeper.ZKUtil.updateExistingNodeData(ZooKeeperWatcher, String, byte[], int)
          Unused 
org.apache.hadoop.hbase.catalog.CatalogTracker.waitForMetaServerConnection(long)
          Use #getMetaServerConnection(long) 
org.apache.hadoop.hbase.HRegionInfo.write(DataOutput)
          Use protobuf serialization instead. See HRegionInfo.toByteArray() and HRegionInfo.toDelimitedByteArray() 
org.apache.hadoop.hbase.HColumnDescriptor.write(DataOutput)
          Writables are going away. Use HColumnDescriptor.toByteArray() instead. 
org.apache.hadoop.hbase.HTableDescriptor.write(DataOutput)
          Writables are going away. Use MessageLite.toByteArray() instead. 
org.apache.hadoop.hbase.regionserver.wal.HLogKey.write(DataOutput)
           
 

Deprecated Constructors
org.apache.hadoop.hbase.client.ClientScanner(Configuration, Scan, byte[])
          Use ClientScanner.ClientScanner(Configuration, Scan, TableName) 
org.apache.hadoop.hbase.client.ClientScanner(Configuration, Scan, byte[], HConnection)
          Use ClientScanner.ClientScanner(Configuration, Scan, TableName, HConnection) 
org.apache.hadoop.hbase.client.ClientScanner(Configuration, Scan, TableName)
           
org.apache.hadoop.hbase.client.ClientScanner(Configuration, Scan, TableName, HConnection, RpcRetryingCallerFactory)
          Use ClientScanner.ClientScanner(Configuration, Scan, TableName, HConnection, RpcRetryingCallerFactory, RpcControllerFactory) instead 
org.apache.hadoop.hbase.ClusterStatus()
          Used by Writables and Writables are going away. 
org.apache.hadoop.hbase.client.CoprocessorHConnection(HConnection, HRegionServer)
          delegate is not used 
org.apache.hadoop.hbase.HBaseConfiguration()
           
org.apache.hadoop.hbase.HBaseConfiguration(Configuration)
           
org.apache.hadoop.hbase.HColumnDescriptor()
          Used by Writables and Writables are going away. 
org.apache.hadoop.hbase.HColumnDescriptor(byte[], int, int, KeepDeletedCells, String, boolean, String, boolean, boolean, int, int, String, int)
          use HColumnDescriptor.HColumnDescriptor(String) and setters 
org.apache.hadoop.hbase.HColumnDescriptor(byte[], int, String, boolean, boolean, int, int, String, int)
          use HColumnDescriptor.HColumnDescriptor(String) and setters 
org.apache.hadoop.hbase.HColumnDescriptor(byte[], int, String, boolean, boolean, int, String)
          use HColumnDescriptor.HColumnDescriptor(String) and setters 
org.apache.hadoop.hbase.regionserver.HRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)
           
org.apache.hadoop.hbase.HRegionInfo()
          Used by Writables and Writables are going away. 
org.apache.hadoop.hbase.HTableDescriptor()
          Used by Writables and Writables are going away. 
org.apache.hadoop.hbase.HTableDescriptor(byte[])
           
org.apache.hadoop.hbase.HTableDescriptor(String)
           
org.apache.hadoop.hbase.io.Reference()
           
org.apache.hadoop.hbase.client.Result(KeyValue[])
          Use Result.create(List) instead. 
org.apache.hadoop.hbase.client.Result(List)
          Use Result.create(List) instead. 
org.apache.hadoop.hbase.client.ReversedScannerCallable(HConnection, TableName, Scan, ScanMetrics, byte[])
           
org.apache.hadoop.hbase.client.ScannerCallable(HConnection, byte[], Scan, ScanMetrics)
          Use ScannerCallable.ScannerCallable(HConnection, TableName, Scan, ScanMetrics, PayloadCarryingRpcController) 
org.apache.hadoop.hbase.mapreduce.TableSplit(byte[], byte[], byte[], String)
          Since 0.96.0; use TableSplit.TableSplit(TableName, byte[], byte[], String) 
org.apache.hadoop.hbase.mapreduce.TableSplit(byte[], Scan, byte[], byte[], String)
          Since 0.96.0; use TableSplit.TableSplit(TableName, byte[], byte[], String) 
org.apache.hadoop.hbase.Tag(byte[], int, short)
          Use Tag.Tag(byte[], int, int) 
 



Copyright © 2007–2015 The Apache Software Foundation. All rights reserved.