org.apache.accumulo.server.util
public class MetadataTable extends MetadataTable
Modifier and Type | Class and Description |
---|---|
static class |
MetadataTable.LogEntry |
MetadataTable.DataFileValue
Modifier and Type | Method and Description |
---|---|
static void |
addBulkLoadInProgressFlag(String path) |
static void |
addDeleteEntries(KeyExtent extent,
Set<String> datafilesToDelete,
TCredentials credentials) |
static void |
addDeleteEntry(String tableId,
String path) |
static void |
addLogEntry(TCredentials credentials,
MetadataTable.LogEntry entry,
ZooLock zooLock) |
static void |
addNewTablet(KeyExtent extent,
String path,
TServerInstance location,
Map<String,MetadataTable.DataFileValue> datafileSizes,
Map<String,Long> bulkLoadedFiles,
TCredentials credentials,
String time,
long lastFlushID,
long lastCompactID,
ZooLock zooLock) |
static void |
addTablet(KeyExtent extent,
String path,
TCredentials credentials,
char timeType,
ZooLock lock) |
static void |
chopped(KeyExtent extent,
ZooLock zooLock) |
static void |
cloneTable(Instance instance,
String srcTableId,
String tableId) |
static Mutation |
createDeleteMutation(String tableId,
String pathToRemove) |
static void |
deleteTable(String tableId,
boolean insertDeletes,
TCredentials credentials,
ZooLock lock) |
static MetadataTable.LogEntry |
entryFromKeyValue(Key key,
Value value) |
static void |
finishSplit(KeyExtent extent,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
TCredentials credentials,
ZooLock zooLock) |
static void |
finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
TCredentials credentials,
ZooLock zooLock) |
static KeyExtent |
fixSplit(org.apache.hadoop.io.Text metadataEntry,
SortedMap<ColumnFQ,Value> columns,
TServerInstance tserver,
TCredentials credentials,
ZooLock lock) |
static List<String> |
getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid) |
static Map<String,Long> |
getBulkFilesLoaded(TCredentials credentials,
KeyExtent extent) |
static Map<String,Long> |
getBulkFilesLoaded(TCredentials credentials,
org.apache.hadoop.io.Text metadataRow) |
static SortedMap<String,MetadataTable.DataFileValue> |
getDataFileSizes(KeyExtent extent,
TCredentials credentials) |
static Pair<List<MetadataTable.LogEntry>,SortedMap<String,MetadataTable.DataFileValue>> |
getFileAndLogEntries(TCredentials credentials,
KeyExtent extent) |
static Iterator<MetadataTable.LogEntry> |
getLogEntries(TCredentials creds) |
static List<MetadataTable.LogEntry> |
getLogEntries(TCredentials credentials,
KeyExtent extent) |
static SortedMap<KeyExtent,org.apache.hadoop.io.Text> |
getMetadataDirectoryEntries(SortedMap<Key,Value> entries)
convenience method for reading entries from the metadata table
|
static Writer |
getMetadataTable(TCredentials credentials) |
static void |
moveMetaDeleteMarkers(Instance instance,
TCredentials creds)
During an upgrade from Accumulo 1.4 -> 1.5, we need to move deletion requests for files under the !METADATA table to the root tablet.
|
static void |
putLockID(ZooLock zooLock,
Mutation m) |
static boolean |
recordRootTabletLocation(String address) |
static void |
removeBulkLoadEntries(Connector conn,
String tableId,
long tid) |
static void |
removeBulkLoadInProgressFlag(String path) |
static void |
removeScanFiles(KeyExtent extent,
Set<String> scanFiles,
TCredentials credentials,
ZooLock zooLock) |
static void |
removeUnusedWALEntries(KeyExtent extent,
List<MetadataTable.LogEntry> logEntries,
ZooLock zooLock) |
static void |
replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
TCredentials credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock) |
static void |
replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
TCredentials credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock,
boolean insertDeleteFlags) |
static void |
rollBackSplit(org.apache.hadoop.io.Text metadataEntry,
org.apache.hadoop.io.Text oldPrevEndRow,
TCredentials credentials,
ZooLock zooLock) |
static void |
splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<String,FileUtil.FileInfo> firstAndLastRows,
SortedMap<String,MetadataTable.DataFileValue> datafiles,
SortedMap<String,MetadataTable.DataFileValue> lowDatafileSizes,
SortedMap<String,MetadataTable.DataFileValue> highDatafileSizes,
List<String> highDatafilesToRemove) |
static void |
splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
TCredentials credentials,
ZooLock zooLock) |
static void |
update(TCredentials credentials,
Mutation m) |
static void |
update(TCredentials credentials,
ZooLock zooLock,
Mutation m) |
static void |
updateTabletCompactID(KeyExtent extent,
long compactID,
TCredentials credentials,
ZooLock zooLock) |
static void |
updateTabletDataFile(KeyExtent extent,
String path,
String mergeFile,
MetadataTable.DataFileValue dfv,
String time,
TCredentials credentials,
Set<String> filesInUseByScans,
String address,
ZooLock zooLock,
Set<String> unusedWalLogs,
TServerInstance lastLocation,
long flushId)
new data file update function adds one data file to a tablet's list
|
static void |
updateTabletDataFile(long tid,
KeyExtent extent,
Map<String,MetadataTable.DataFileValue> estSizes,
String time,
TCredentials credentials,
ZooLock zooLock) |
static void |
updateTabletFlushID(KeyExtent extent,
long flushID,
TCredentials credentials,
ZooLock zooLock) |
static void |
updateTabletPrevEndRow(KeyExtent extent,
TCredentials credentials) |
getEntries, getMetadataLocationEntries, getTabletEntries, isContiguousRange, validateEntries
public static Writer getMetadataTable(TCredentials credentials)
public static void update(TCredentials credentials, Mutation m)
public static void update(TCredentials credentials, ZooLock zooLock, Mutation m)
public static void updateTabletDataFile(KeyExtent extent, String path, String mergeFile, MetadataTable.DataFileValue dfv, String time, TCredentials credentials, Set<String> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId)
path
- should be relative to the table directorypublic static void updateTabletFlushID(KeyExtent extent, long flushID, TCredentials credentials, ZooLock zooLock)
public static void updateTabletCompactID(KeyExtent extent, long compactID, TCredentials credentials, ZooLock zooLock)
public static void updateTabletDataFile(long tid, KeyExtent extent, Map<String,MetadataTable.DataFileValue> estSizes, String time, TCredentials credentials, ZooLock zooLock)
public static void addTablet(KeyExtent extent, String path, TCredentials credentials, char timeType, ZooLock lock)
public static void updateTabletPrevEndRow(KeyExtent extent, TCredentials credentials)
public static SortedMap<KeyExtent,org.apache.hadoop.io.Text> getMetadataDirectoryEntries(SortedMap<Key,Value> entries)
public static boolean recordRootTabletLocation(String address)
public static SortedMap<String,MetadataTable.DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials)
public static void addNewTablet(KeyExtent extent, String path, TServerInstance location, Map<String,MetadataTable.DataFileValue> datafileSizes, Map<String,Long> bulkLoadedFiles, TCredentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock)
public static void rollBackSplit(org.apache.hadoop.io.Text metadataEntry, org.apache.hadoop.io.Text oldPrevEndRow, TCredentials credentials, ZooLock zooLock)
public static void splitTablet(KeyExtent extent, org.apache.hadoop.io.Text oldPrevEndRow, double splitRatio, TCredentials credentials, ZooLock zooLock)
public static void finishSplit(org.apache.hadoop.io.Text metadataEntry, Map<String,MetadataTable.DataFileValue> datafileSizes, List<String> highDatafilesToRemove, TCredentials credentials, ZooLock zooLock)
public static void finishSplit(KeyExtent extent, Map<String,MetadataTable.DataFileValue> datafileSizes, List<String> highDatafilesToRemove, TCredentials credentials, ZooLock zooLock)
public static void replaceDatafiles(KeyExtent extent, Set<String> datafilesToDelete, Set<String> scanFiles, String path, Long compactionId, MetadataTable.DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock)
public static void replaceDatafiles(KeyExtent extent, Set<String> datafilesToDelete, Set<String> scanFiles, String path, Long compactionId, MetadataTable.DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags)
public static void addDeleteEntries(KeyExtent extent, Set<String> datafilesToDelete, TCredentials credentials)
public static Mutation createDeleteMutation(String tableId, String pathToRemove)
public static void removeScanFiles(KeyExtent extent, Set<String> scanFiles, TCredentials credentials, ZooLock zooLock)
public static void splitDatafiles(org.apache.hadoop.io.Text table, org.apache.hadoop.io.Text midRow, double splitRatio, Map<String,FileUtil.FileInfo> firstAndLastRows, SortedMap<String,MetadataTable.DataFileValue> datafiles, SortedMap<String,MetadataTable.DataFileValue> lowDatafileSizes, SortedMap<String,MetadataTable.DataFileValue> highDatafileSizes, List<String> highDatafilesToRemove)
public static KeyExtent fixSplit(org.apache.hadoop.io.Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, TCredentials credentials, ZooLock lock) throws AccumuloException
AccumuloException
public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException
AccumuloException
public static void addLogEntry(TCredentials credentials, MetadataTable.LogEntry entry, ZooLock zooLock)
public static MetadataTable.LogEntry entryFromKeyValue(Key key, Value value)
public static Pair<List<MetadataTable.LogEntry>,SortedMap<String,MetadataTable.DataFileValue>> getFileAndLogEntries(TCredentials credentials, KeyExtent extent) throws org.apache.zookeeper.KeeperException, InterruptedException, IOException
org.apache.zookeeper.KeeperException
InterruptedException
IOException
public static List<MetadataTable.LogEntry> getLogEntries(TCredentials credentials, KeyExtent extent) throws IOException, org.apache.zookeeper.KeeperException, InterruptedException
IOException
org.apache.zookeeper.KeeperException
InterruptedException
public static Iterator<MetadataTable.LogEntry> getLogEntries(TCredentials creds) throws IOException, org.apache.zookeeper.KeeperException, InterruptedException
IOException
org.apache.zookeeper.KeeperException
InterruptedException
public static void removeUnusedWALEntries(KeyExtent extent, List<MetadataTable.LogEntry> logEntries, ZooLock zooLock)
public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception
Exception
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception
Exception
public static List<String> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid)
public static Map<String,Long> getBulkFilesLoaded(TCredentials credentials, KeyExtent extent)
public static Map<String,Long> getBulkFilesLoaded(TCredentials credentials, org.apache.hadoop.io.Text metadataRow)
public static void addBulkLoadInProgressFlag(String path)
public static void removeBulkLoadInProgressFlag(String path)
public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds)
Copyright © 2015 Apache Accumulo Project. All Rights Reserved.