org.apache.accumulo.server.util
Class MetadataTable
java.lang.Object
org.apache.accumulo.core.util.MetadataTable
org.apache.accumulo.server.util.MetadataTable
public class MetadataTable
- extends MetadataTable
provides a reference to the metadata table for updates by tablet servers
Method Summary |
static void |
addBulkLoadInProgressFlag(String path)
|
static void |
addDeleteEntries(KeyExtent extent,
Set<String> datafilesToDelete,
TCredentials credentials)
|
static void |
addDeleteEntry(String tableId,
String path)
|
static void |
addLogEntry(TCredentials credentials,
MetadataTable.LogEntry entry,
ZooLock zooLock)
|
static void |
addNewTablet(KeyExtent extent,
String path,
TServerInstance location,
Map<String,MetadataTable.DataFileValue> datafileSizes,
Map<String,Long> bulkLoadedFiles,
TCredentials credentials,
String time,
long lastFlushID,
long lastCompactID,
ZooLock zooLock)
|
static void |
addTablet(KeyExtent extent,
String path,
TCredentials credentials,
char timeType,
ZooLock lock)
|
static void |
chopped(KeyExtent extent,
ZooLock zooLock)
|
static void |
cloneTable(Instance instance,
String srcTableId,
String tableId)
|
static Mutation |
createDeleteMutation(String tableId,
String pathToRemove)
|
static void |
deleteTable(String tableId,
boolean insertDeletes,
TCredentials credentials,
ZooLock lock)
|
static MetadataTable.LogEntry |
entryFromKeyValue(Key key,
Value value)
|
static void |
finishSplit(KeyExtent extent,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
TCredentials credentials,
ZooLock zooLock)
|
static void |
finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
TCredentials credentials,
ZooLock zooLock)
|
static KeyExtent |
fixSplit(org.apache.hadoop.io.Text metadataEntry,
SortedMap<ColumnFQ,Value> columns,
TServerInstance tserver,
TCredentials credentials,
ZooLock lock)
|
static List<String> |
getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid)
|
static Map<String,Long> |
getBulkFilesLoaded(TCredentials credentials,
KeyExtent extent)
|
static Map<String,Long> |
getBulkFilesLoaded(TCredentials credentials,
org.apache.hadoop.io.Text metadataRow)
|
static SortedMap<String,MetadataTable.DataFileValue> |
getDataFileSizes(KeyExtent extent,
TCredentials credentials)
|
static Pair<List<MetadataTable.LogEntry>,SortedMap<String,MetadataTable.DataFileValue>> |
getFileAndLogEntries(TCredentials credentials,
KeyExtent extent)
|
static Iterator<MetadataTable.LogEntry> |
getLogEntries(TCredentials creds)
|
static List<MetadataTable.LogEntry> |
getLogEntries(TCredentials credentials,
KeyExtent extent)
|
static SortedMap<KeyExtent,org.apache.hadoop.io.Text> |
getMetadataDirectoryEntries(SortedMap<Key,Value> entries)
convenience method for reading entries from the metadata table |
static Writer |
getMetadataTable(TCredentials credentials)
|
static void |
moveMetaDeleteMarkers(Instance instance,
TCredentials creds)
|
static void |
putLockID(ZooLock zooLock,
Mutation m)
|
static boolean |
recordRootTabletLocation(String address)
|
static void |
removeBulkLoadEntries(Connector conn,
String tableId,
long tid)
|
static void |
removeBulkLoadInProgressFlag(String path)
|
static void |
removeScanFiles(KeyExtent extent,
Set<String> scanFiles,
TCredentials credentials,
ZooLock zooLock)
|
static void |
removeUnusedWALEntries(KeyExtent extent,
List<MetadataTable.LogEntry> logEntries,
ZooLock zooLock)
|
static void |
replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
TCredentials credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock)
|
static void |
replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
TCredentials credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock,
boolean insertDeleteFlags)
|
static void |
rollBackSplit(org.apache.hadoop.io.Text metadataEntry,
org.apache.hadoop.io.Text oldPrevEndRow,
TCredentials credentials,
ZooLock zooLock)
|
static void |
splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<String,FileUtil.FileInfo> firstAndLastRows,
SortedMap<String,MetadataTable.DataFileValue> datafiles,
SortedMap<String,MetadataTable.DataFileValue> lowDatafileSizes,
SortedMap<String,MetadataTable.DataFileValue> highDatafileSizes,
List<String> highDatafilesToRemove)
|
static void |
splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
TCredentials credentials,
ZooLock zooLock)
|
static void |
update(TCredentials credentials,
Mutation m)
|
static void |
update(TCredentials credentials,
ZooLock zooLock,
Mutation m)
|
static void |
updateTabletCompactID(KeyExtent extent,
long compactID,
TCredentials credentials,
ZooLock zooLock)
|
static void |
updateTabletDataFile(KeyExtent extent,
String path,
String mergeFile,
MetadataTable.DataFileValue dfv,
String time,
TCredentials credentials,
Set<String> filesInUseByScans,
String address,
ZooLock zooLock,
Set<String> unusedWalLogs,
TServerInstance lastLocation,
long flushId)
new data file update function adds one data file to a tablet's list
path should be relative to the table directory |
static void |
updateTabletDataFile(long tid,
KeyExtent extent,
Map<String,MetadataTable.DataFileValue> estSizes,
String time,
TCredentials credentials,
ZooLock zooLock)
|
static void |
updateTabletFlushID(KeyExtent extent,
long flushID,
TCredentials credentials,
ZooLock zooLock)
|
static void |
updateTabletPrevEndRow(KeyExtent extent,
TCredentials credentials)
|
Methods inherited from class java.lang.Object |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
getMetadataTable
public static Writer getMetadataTable(TCredentials credentials)
putLockID
public static void putLockID(ZooLock zooLock,
Mutation m)
update
public static void update(TCredentials credentials,
Mutation m)
update
public static void update(TCredentials credentials,
ZooLock zooLock,
Mutation m)
updateTabletDataFile
public static void updateTabletDataFile(KeyExtent extent,
String path,
String mergeFile,
MetadataTable.DataFileValue dfv,
String time,
TCredentials credentials,
Set<String> filesInUseByScans,
String address,
ZooLock zooLock,
Set<String> unusedWalLogs,
TServerInstance lastLocation,
long flushId)
- new data file update function adds one data file to a tablet's list
path should be relative to the table directory
- Parameters:
time
- filesInUseByScans
- zooLock
- flushId
-
updateTabletFlushID
public static void updateTabletFlushID(KeyExtent extent,
long flushID,
TCredentials credentials,
ZooLock zooLock)
updateTabletCompactID
public static void updateTabletCompactID(KeyExtent extent,
long compactID,
TCredentials credentials,
ZooLock zooLock)
updateTabletDataFile
public static void updateTabletDataFile(long tid,
KeyExtent extent,
Map<String,MetadataTable.DataFileValue> estSizes,
String time,
TCredentials credentials,
ZooLock zooLock)
addTablet
public static void addTablet(KeyExtent extent,
String path,
TCredentials credentials,
char timeType,
ZooLock lock)
updateTabletPrevEndRow
public static void updateTabletPrevEndRow(KeyExtent extent,
TCredentials credentials)
getMetadataDirectoryEntries
public static SortedMap<KeyExtent,org.apache.hadoop.io.Text> getMetadataDirectoryEntries(SortedMap<Key,Value> entries)
- convenience method for reading entries from the metadata table
recordRootTabletLocation
public static boolean recordRootTabletLocation(String address)
getDataFileSizes
public static SortedMap<String,MetadataTable.DataFileValue> getDataFileSizes(KeyExtent extent,
TCredentials credentials)
addNewTablet
public static void addNewTablet(KeyExtent extent,
String path,
TServerInstance location,
Map<String,MetadataTable.DataFileValue> datafileSizes,
Map<String,Long> bulkLoadedFiles,
TCredentials credentials,
String time,
long lastFlushID,
long lastCompactID,
ZooLock zooLock)
rollBackSplit
public static void rollBackSplit(org.apache.hadoop.io.Text metadataEntry,
org.apache.hadoop.io.Text oldPrevEndRow,
TCredentials credentials,
ZooLock zooLock)
splitTablet
public static void splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
TCredentials credentials,
ZooLock zooLock)
finishSplit
public static void finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
TCredentials credentials,
ZooLock zooLock)
finishSplit
public static void finishSplit(KeyExtent extent,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
TCredentials credentials,
ZooLock zooLock)
replaceDatafiles
public static void replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
TCredentials credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock)
replaceDatafiles
public static void replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
TCredentials credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock,
boolean insertDeleteFlags)
addDeleteEntries
public static void addDeleteEntries(KeyExtent extent,
Set<String> datafilesToDelete,
TCredentials credentials)
addDeleteEntry
public static void addDeleteEntry(String tableId,
String path)
createDeleteMutation
public static Mutation createDeleteMutation(String tableId,
String pathToRemove)
removeScanFiles
public static void removeScanFiles(KeyExtent extent,
Set<String> scanFiles,
TCredentials credentials,
ZooLock zooLock)
splitDatafiles
public static void splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<String,FileUtil.FileInfo> firstAndLastRows,
SortedMap<String,MetadataTable.DataFileValue> datafiles,
SortedMap<String,MetadataTable.DataFileValue> lowDatafileSizes,
SortedMap<String,MetadataTable.DataFileValue> highDatafileSizes,
List<String> highDatafilesToRemove)
fixSplit
public static KeyExtent fixSplit(org.apache.hadoop.io.Text metadataEntry,
SortedMap<ColumnFQ,Value> columns,
TServerInstance tserver,
TCredentials credentials,
ZooLock lock)
throws AccumuloException
- Throws:
AccumuloException
deleteTable
public static void deleteTable(String tableId,
boolean insertDeletes,
TCredentials credentials,
ZooLock lock)
throws AccumuloException
- Throws:
AccumuloException
addLogEntry
public static void addLogEntry(TCredentials credentials,
MetadataTable.LogEntry entry,
ZooLock zooLock)
entryFromKeyValue
public static MetadataTable.LogEntry entryFromKeyValue(Key key,
Value value)
getFileAndLogEntries
public static Pair<List<MetadataTable.LogEntry>,SortedMap<String,MetadataTable.DataFileValue>> getFileAndLogEntries(TCredentials credentials,
KeyExtent extent)
throws org.apache.zookeeper.KeeperException,
InterruptedException,
IOException
- Throws:
org.apache.zookeeper.KeeperException
InterruptedException
IOException
getLogEntries
public static List<MetadataTable.LogEntry> getLogEntries(TCredentials credentials,
KeyExtent extent)
throws IOException,
org.apache.zookeeper.KeeperException,
InterruptedException
- Throws:
IOException
org.apache.zookeeper.KeeperException
InterruptedException
getLogEntries
public static Iterator<MetadataTable.LogEntry> getLogEntries(TCredentials creds)
throws IOException,
org.apache.zookeeper.KeeperException,
InterruptedException
- Throws:
IOException
org.apache.zookeeper.KeeperException
InterruptedException
removeUnusedWALEntries
public static void removeUnusedWALEntries(KeyExtent extent,
List<MetadataTable.LogEntry> logEntries,
ZooLock zooLock)
cloneTable
public static void cloneTable(Instance instance,
String srcTableId,
String tableId)
throws Exception
- Throws:
Exception
chopped
public static void chopped(KeyExtent extent,
ZooLock zooLock)
removeBulkLoadEntries
public static void removeBulkLoadEntries(Connector conn,
String tableId,
long tid)
throws Exception
- Throws:
Exception
getBulkFilesLoaded
public static List<String> getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid)
getBulkFilesLoaded
public static Map<String,Long> getBulkFilesLoaded(TCredentials credentials,
KeyExtent extent)
getBulkFilesLoaded
public static Map<String,Long> getBulkFilesLoaded(TCredentials credentials,
org.apache.hadoop.io.Text metadataRow)
addBulkLoadInProgressFlag
public static void addBulkLoadInProgressFlag(String path)
removeBulkLoadInProgressFlag
public static void removeBulkLoadInProgressFlag(String path)
moveMetaDeleteMarkers
public static void moveMetaDeleteMarkers(Instance instance,
TCredentials creds)
Copyright © 2013 Apache Accumulo Project. All Rights Reserved.