org.apache.accumulo.server.util
Class MetadataTable
java.lang.Object
org.apache.accumulo.core.util.MetadataTable
org.apache.accumulo.server.util.MetadataTable
public class MetadataTable
- extends MetadataTable
Method Summary |
static void |
addBulkLoadInProgressFlag(String path)
|
static void |
addDeleteEntries(KeyExtent extent,
Set<String> datafilesToDelete,
AuthInfo credentials)
|
static void |
addDeleteEntry(String tableId,
String path)
|
static void |
addLogEntries(AuthInfo credentials,
List<MetadataTable.LogEntry> entries,
ZooLock zooLock)
|
static void |
addNewTablet(KeyExtent extent,
String path,
TServerInstance location,
Map<String,MetadataTable.DataFileValue> datafileSizes,
Map<String,Long> bulkLoadedFiles,
AuthInfo credentials,
String time,
long lastFlushID,
long lastCompactID,
ZooLock zooLock)
|
static void |
addTablet(KeyExtent extent,
String path,
AuthInfo credentials,
char timeType,
ZooLock lock)
|
static void |
chopped(KeyExtent extent,
ZooLock zooLock)
|
static void |
cloneTable(Instance instance,
String srcTableId,
String tableId)
|
static Mutation |
createDeleteMutation(String tableId,
String pathToRemove)
|
static void |
deleteTable(String tableId,
boolean insertDeletes,
AuthInfo credentials,
ZooLock lock)
|
static MetadataTable.LogEntry |
entryFromKeyValue(Key key,
Value value)
|
static void |
finishSplit(KeyExtent extent,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
AuthInfo credentials,
ZooLock zooLock)
|
static void |
finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
AuthInfo credentials,
ZooLock zooLock)
|
static KeyExtent |
fixSplit(org.apache.hadoop.io.Text metadataEntry,
SortedMap<ColumnFQ,Value> columns,
TServerInstance tserver,
AuthInfo credentials,
ZooLock lock)
|
static boolean |
getBatchFromRootTablet(AuthInfo credentials,
org.apache.hadoop.io.Text startRow,
SortedMap<Key,Value> results,
SortedSet<Column> columns,
boolean skipStartRow,
int size)
|
static Map<String,Long> |
getBulkFilesLoaded(AuthInfo credentials,
KeyExtent extent)
|
static Map<String,Long> |
getBulkFilesLoaded(AuthInfo credentials,
org.apache.hadoop.io.Text metadataRow)
|
static List<String> |
getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid)
|
static SortedMap<String,MetadataTable.DataFileValue> |
getDataFileSizes(KeyExtent extent,
AuthInfo credentials)
|
static Pair<List<MetadataTable.LogEntry>,SortedMap<String,MetadataTable.DataFileValue>> |
getFileAndLogEntries(AuthInfo credentials,
KeyExtent extent)
|
static Iterator<MetadataTable.LogEntry> |
getLogEntries(AuthInfo creds)
|
static List<MetadataTable.LogEntry> |
getLogEntries(AuthInfo credentials,
KeyExtent extent)
|
static SortedMap<KeyExtent,org.apache.hadoop.io.Text> |
getMetadataDirectoryEntries(SortedMap<Key,Value> entries)
convenience method for reading entries from the metadata table |
static Writer |
getMetadataTable(AuthInfo credentials)
|
static void |
getTabletAndPrevTabletKeyValues(SortedMap<Key,Value> tkv,
KeyExtent ke,
List<ColumnFQ> columns,
AuthInfo credentials)
|
static SortedMap<org.apache.hadoop.io.Text,SortedMap<ColumnFQ,Value>> |
getTabletEntries(KeyExtent ke,
List<ColumnFQ> columns,
AuthInfo credentials)
|
static void |
putLockID(ZooLock zooLock,
Mutation m)
|
static boolean |
recordRootTabletLocation(String address)
|
static void |
removeBulkLoadEntries(Connector conn,
String tableId,
long tid)
|
static void |
removeBulkLoadInProgressFlag(String path)
|
static void |
removeScanFiles(KeyExtent extent,
Set<String> scanFiles,
AuthInfo credentials,
ZooLock zooLock)
|
static void |
removeUnusedWALEntries(KeyExtent extent,
List<MetadataTable.LogEntry> logEntries,
ZooLock zooLock)
|
static void |
replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
AuthInfo credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock)
|
static void |
replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
AuthInfo credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock,
boolean insertDeleteFlags)
|
static void |
splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<String,FileUtil.FileInfo> firstAndLastRows,
SortedMap<String,MetadataTable.DataFileValue> datafiles,
SortedMap<String,MetadataTable.DataFileValue> lowDatafileSizes,
SortedMap<String,MetadataTable.DataFileValue> highDatafileSizes,
List<String> highDatafilesToRemove)
|
static void |
splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
AuthInfo credentials,
ZooLock zooLock)
|
static void |
update(AuthInfo credentials,
Mutation m)
|
static void |
update(AuthInfo credentials,
ZooLock zooLock,
Mutation m)
|
static void |
updateTabletCompactID(KeyExtent extent,
long compactID,
AuthInfo credentials,
ZooLock zooLock)
|
static void |
updateTabletDataFile(KeyExtent extent,
String path,
String mergeFile,
MetadataTable.DataFileValue dfv,
String time,
AuthInfo credentials,
Set<String> filesInUseByScans,
String address,
ZooLock zooLock,
Set<String> unusedWalLogs,
TServerInstance lastLocation,
long flushId)
new data file update function adds one data file to a tablet's list
path should be relative to the table directory |
static void |
updateTabletDataFile(long tid,
KeyExtent extent,
Map<String,MetadataTable.DataFileValue> estSizes,
String time,
AuthInfo credentials,
ZooLock zooLock)
|
static void |
updateTabletFlushID(KeyExtent extent,
long flushID,
AuthInfo credentials,
ZooLock zooLock)
|
static void |
updateTabletPrevEndRow(KeyExtent extent,
AuthInfo credentials)
|
Methods inherited from class java.lang.Object |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
getMetadataTable
public static Writer getMetadataTable(AuthInfo credentials)
putLockID
public static void putLockID(ZooLock zooLock,
Mutation m)
update
public static void update(AuthInfo credentials,
Mutation m)
update
public static void update(AuthInfo credentials,
ZooLock zooLock,
Mutation m)
updateTabletDataFile
public static void updateTabletDataFile(KeyExtent extent,
String path,
String mergeFile,
MetadataTable.DataFileValue dfv,
String time,
AuthInfo credentials,
Set<String> filesInUseByScans,
String address,
ZooLock zooLock,
Set<String> unusedWalLogs,
TServerInstance lastLocation,
long flushId)
- new data file update function adds one data file to a tablet's list
path should be relative to the table directory
- Parameters:
time
- filesInUseByScans
- zooLock
- flushId
-
updateTabletFlushID
public static void updateTabletFlushID(KeyExtent extent,
long flushID,
AuthInfo credentials,
ZooLock zooLock)
updateTabletCompactID
public static void updateTabletCompactID(KeyExtent extent,
long compactID,
AuthInfo credentials,
ZooLock zooLock)
updateTabletDataFile
public static void updateTabletDataFile(long tid,
KeyExtent extent,
Map<String,MetadataTable.DataFileValue> estSizes,
String time,
AuthInfo credentials,
ZooLock zooLock)
addTablet
public static void addTablet(KeyExtent extent,
String path,
AuthInfo credentials,
char timeType,
ZooLock lock)
updateTabletPrevEndRow
public static void updateTabletPrevEndRow(KeyExtent extent,
AuthInfo credentials)
getMetadataDirectoryEntries
public static SortedMap<KeyExtent,org.apache.hadoop.io.Text> getMetadataDirectoryEntries(SortedMap<Key,Value> entries)
- convenience method for reading entries from the metadata table
getBatchFromRootTablet
public static boolean getBatchFromRootTablet(AuthInfo credentials,
org.apache.hadoop.io.Text startRow,
SortedMap<Key,Value> results,
SortedSet<Column> columns,
boolean skipStartRow,
int size)
throws AccumuloSecurityException
- Throws:
AccumuloSecurityException
recordRootTabletLocation
public static boolean recordRootTabletLocation(String address)
getDataFileSizes
public static SortedMap<String,MetadataTable.DataFileValue> getDataFileSizes(KeyExtent extent,
AuthInfo credentials)
addNewTablet
public static void addNewTablet(KeyExtent extent,
String path,
TServerInstance location,
Map<String,MetadataTable.DataFileValue> datafileSizes,
Map<String,Long> bulkLoadedFiles,
AuthInfo credentials,
String time,
long lastFlushID,
long lastCompactID,
ZooLock zooLock)
splitTablet
public static void splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
AuthInfo credentials,
ZooLock zooLock)
finishSplit
public static void finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
AuthInfo credentials,
ZooLock zooLock)
finishSplit
public static void finishSplit(KeyExtent extent,
Map<String,MetadataTable.DataFileValue> datafileSizes,
List<String> highDatafilesToRemove,
AuthInfo credentials,
ZooLock zooLock)
replaceDatafiles
public static void replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
AuthInfo credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock)
replaceDatafiles
public static void replaceDatafiles(KeyExtent extent,
Set<String> datafilesToDelete,
Set<String> scanFiles,
String path,
Long compactionId,
MetadataTable.DataFileValue size,
AuthInfo credentials,
String address,
TServerInstance lastLocation,
ZooLock zooLock,
boolean insertDeleteFlags)
addDeleteEntries
public static void addDeleteEntries(KeyExtent extent,
Set<String> datafilesToDelete,
AuthInfo credentials)
addDeleteEntry
public static void addDeleteEntry(String tableId,
String path)
createDeleteMutation
public static Mutation createDeleteMutation(String tableId,
String pathToRemove)
removeScanFiles
public static void removeScanFiles(KeyExtent extent,
Set<String> scanFiles,
AuthInfo credentials,
ZooLock zooLock)
getTabletAndPrevTabletKeyValues
public static void getTabletAndPrevTabletKeyValues(SortedMap<Key,Value> tkv,
KeyExtent ke,
List<ColumnFQ> columns,
AuthInfo credentials)
getTabletEntries
public static SortedMap<org.apache.hadoop.io.Text,SortedMap<ColumnFQ,Value>> getTabletEntries(KeyExtent ke,
List<ColumnFQ> columns,
AuthInfo credentials)
splitDatafiles
public static void splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<String,FileUtil.FileInfo> firstAndLastRows,
SortedMap<String,MetadataTable.DataFileValue> datafiles,
SortedMap<String,MetadataTable.DataFileValue> lowDatafileSizes,
SortedMap<String,MetadataTable.DataFileValue> highDatafileSizes,
List<String> highDatafilesToRemove)
fixSplit
public static KeyExtent fixSplit(org.apache.hadoop.io.Text metadataEntry,
SortedMap<ColumnFQ,Value> columns,
TServerInstance tserver,
AuthInfo credentials,
ZooLock lock)
throws AccumuloException
- Throws:
AccumuloException
deleteTable
public static void deleteTable(String tableId,
boolean insertDeletes,
AuthInfo credentials,
ZooLock lock)
throws AccumuloException
- Throws:
AccumuloException
addLogEntries
public static void addLogEntries(AuthInfo credentials,
List<MetadataTable.LogEntry> entries,
ZooLock zooLock)
entryFromKeyValue
public static MetadataTable.LogEntry entryFromKeyValue(Key key,
Value value)
getFileAndLogEntries
public static Pair<List<MetadataTable.LogEntry>,SortedMap<String,MetadataTable.DataFileValue>> getFileAndLogEntries(AuthInfo credentials,
KeyExtent extent)
throws org.apache.zookeeper.KeeperException,
InterruptedException,
IOException
- Throws:
org.apache.zookeeper.KeeperException
InterruptedException
IOException
getLogEntries
public static List<MetadataTable.LogEntry> getLogEntries(AuthInfo credentials,
KeyExtent extent)
throws IOException,
org.apache.zookeeper.KeeperException,
InterruptedException
- Throws:
IOException
org.apache.zookeeper.KeeperException
InterruptedException
getLogEntries
public static Iterator<MetadataTable.LogEntry> getLogEntries(AuthInfo creds)
throws IOException,
org.apache.zookeeper.KeeperException,
InterruptedException
- Throws:
IOException
org.apache.zookeeper.KeeperException
InterruptedException
removeUnusedWALEntries
public static void removeUnusedWALEntries(KeyExtent extent,
List<MetadataTable.LogEntry> logEntries,
ZooLock zooLock)
cloneTable
public static void cloneTable(Instance instance,
String srcTableId,
String tableId)
throws Exception
- Throws:
Exception
chopped
public static void chopped(KeyExtent extent,
ZooLock zooLock)
removeBulkLoadEntries
public static void removeBulkLoadEntries(Connector conn,
String tableId,
long tid)
throws Exception
- Throws:
Exception
getBulkFilesLoaded
public static List<String> getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid)
getBulkFilesLoaded
public static Map<String,Long> getBulkFilesLoaded(AuthInfo credentials,
KeyExtent extent)
getBulkFilesLoaded
public static Map<String,Long> getBulkFilesLoaded(AuthInfo credentials,
org.apache.hadoop.io.Text metadataRow)
addBulkLoadInProgressFlag
public static void addBulkLoadInProgressFlag(String path)
removeBulkLoadInProgressFlag
public static void removeBulkLoadInProgressFlag(String path)
Copyright © 2012 The Apache Software Foundation. All Rights Reserved.