public abstract class AbstractStorageManager extends Object
Modifier and Type | Field and Description |
---|---|
protected static Map<String,Class<? extends FileAppender>> |
APPENDER_HANDLER_CACHE
Cache of appender handlers for each storage type.
|
protected boolean |
blocksMetadataEnabled |
protected TajoConf |
conf |
protected org.apache.hadoop.fs.FileSystem |
fs |
protected static Map<String,Class<? extends Scanner>> |
SCANNER_HANDLER_CACHE
Cache of scanner handlers for each storage type.
|
protected org.apache.hadoop.fs.Path |
tableBaseDir |
Modifier | Constructor and Description |
---|---|
protected |
AbstractStorageManager(TajoConf conf) |
Modifier and Type | Method and Description |
---|---|
long |
calculateSize(org.apache.hadoop.fs.Path tablePath) |
void |
delete(org.apache.hadoop.fs.Path tablePath) |
void |
deleteData(org.apache.hadoop.fs.Path path)
This method deletes only data contained in the given path.
|
boolean |
exists(org.apache.hadoop.fs.Path path) |
Appender |
getAppender(TableMeta meta,
Schema schema,
org.apache.hadoop.fs.Path path) |
protected int |
getBlockIndex(org.apache.hadoop.fs.BlockLocation[] blkLocations,
long offset) |
Scanner |
getFileScanner(TableMeta meta,
Schema schema,
org.apache.hadoop.fs.Path path) |
Scanner |
getFileScanner(TableMeta meta,
Schema schema,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.FileStatus status) |
org.apache.hadoop.fs.FileSystem |
getFileSystem() |
long |
getMinSplitSize()
Get the minimum split size
|
Scanner |
getScanner(TableMeta meta,
Schema schema,
CatalogProtos.FragmentProto fragment) |
Scanner |
getScanner(TableMeta meta,
Schema schema,
CatalogProtos.FragmentProto fragment,
Schema target) |
Scanner |
getScanner(TableMeta meta,
Schema schema,
Fragment fragment) |
abstract Scanner |
getScanner(TableMeta meta,
Schema schema,
Fragment fragment,
Schema target) |
abstract Class<? extends Scanner> |
getScannerClass(CatalogProtos.StoreType storeType) |
List<FileFragment> |
getSplits(String tableName,
TableMeta meta,
Schema schema,
org.apache.hadoop.fs.Path... inputs)
Generate the list of files and make them into FileSplits.
|
TableMeta |
getTableMeta(org.apache.hadoop.fs.Path tablePath) |
org.apache.hadoop.fs.Path |
getTablePath(String tableName) |
org.apache.hadoop.fs.Path |
getWarehouseDir() |
protected boolean |
isSplittable(TableMeta meta,
Schema schema,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.FileStatus status)
Is the given filename splitable? Usually, true, but if the file is
stream compressed, it will not be.
|
protected List<org.apache.hadoop.fs.FileStatus> |
listStatus(org.apache.hadoop.fs.Path... dirs)
List input directories.
|
protected FileFragment |
makeNonSplit(String fragmentId,
org.apache.hadoop.fs.Path file,
long start,
long length,
org.apache.hadoop.fs.BlockLocation[] blkLocations) |
protected FileFragment |
makeSplit(String fragmentId,
org.apache.hadoop.fs.Path file,
org.apache.hadoop.fs.BlockLocation blockLocation) |
protected FileFragment |
makeSplit(String fragmentId,
org.apache.hadoop.fs.Path file,
long start,
long length)
A factory that makes the split for this class.
|
protected FileFragment |
makeSplit(String fragmentId,
org.apache.hadoop.fs.Path file,
long start,
long length,
String[] hosts) |
static <T> T |
newAppenderInstance(Class<T> theClass,
org.apache.hadoop.conf.Configuration conf,
TableMeta meta,
Schema schema,
org.apache.hadoop.fs.Path path)
create a scanner instance.
|
static <T> T |
newScannerInstance(Class<T> theClass,
org.apache.hadoop.conf.Configuration conf,
Schema schema,
TableMeta meta,
Fragment fragment)
create a scanner instance.
|
FileFragment[] |
split(org.apache.hadoop.fs.Path tablePath) |
FileFragment[] |
split(String tableName) |
FileFragment[] |
split(String tableName,
long fragmentSize) |
FileFragment[] |
split(String tableName,
org.apache.hadoop.fs.Path tablePath) |
FileFragment[] |
splitBroadcastTable(org.apache.hadoop.fs.Path tablePath) |
static FileFragment[] |
splitNG(org.apache.hadoop.conf.Configuration conf,
String tableName,
TableMeta meta,
org.apache.hadoop.fs.Path tablePath,
long size) |
protected final TajoConf conf
protected final org.apache.hadoop.fs.FileSystem fs
protected final org.apache.hadoop.fs.Path tableBaseDir
protected final boolean blocksMetadataEnabled
protected static final Map<String,Class<? extends Scanner>> SCANNER_HANDLER_CACHE
protected static final Map<String,Class<? extends FileAppender>> APPENDER_HANDLER_CACHE
protected AbstractStorageManager(TajoConf conf) throws IOException
IOException
public abstract Class<? extends Scanner> getScannerClass(CatalogProtos.StoreType storeType) throws IOException
IOException
public abstract Scanner getScanner(TableMeta meta, Schema schema, Fragment fragment, Schema target) throws IOException
IOException
public Scanner getFileScanner(TableMeta meta, Schema schema, org.apache.hadoop.fs.Path path) throws IOException
IOException
public Scanner getFileScanner(TableMeta meta, Schema schema, org.apache.hadoop.fs.Path path, org.apache.hadoop.fs.FileStatus status) throws IOException
IOException
public Scanner getScanner(TableMeta meta, Schema schema, CatalogProtos.FragmentProto fragment) throws IOException
IOException
public Scanner getScanner(TableMeta meta, Schema schema, CatalogProtos.FragmentProto fragment, Schema target) throws IOException
IOException
public Scanner getScanner(TableMeta meta, Schema schema, Fragment fragment) throws IOException
IOException
public org.apache.hadoop.fs.FileSystem getFileSystem()
public org.apache.hadoop.fs.Path getWarehouseDir()
public void delete(org.apache.hadoop.fs.Path tablePath) throws IOException
IOException
public boolean exists(org.apache.hadoop.fs.Path path) throws IOException
IOException
public void deleteData(org.apache.hadoop.fs.Path path) throws IOException
path
- The path in which data are deleted.IOException
public org.apache.hadoop.fs.Path getTablePath(String tableName)
public Appender getAppender(TableMeta meta, Schema schema, org.apache.hadoop.fs.Path path) throws IOException
IOException
public TableMeta getTableMeta(org.apache.hadoop.fs.Path tablePath) throws IOException
IOException
public FileFragment[] split(String tableName) throws IOException
IOException
public FileFragment[] split(String tableName, long fragmentSize) throws IOException
IOException
public FileFragment[] splitBroadcastTable(org.apache.hadoop.fs.Path tablePath) throws IOException
IOException
public FileFragment[] split(org.apache.hadoop.fs.Path tablePath) throws IOException
IOException
public FileFragment[] split(String tableName, org.apache.hadoop.fs.Path tablePath) throws IOException
IOException
public static FileFragment[] splitNG(org.apache.hadoop.conf.Configuration conf, String tableName, TableMeta meta, org.apache.hadoop.fs.Path tablePath, long size) throws IOException
IOException
public long calculateSize(org.apache.hadoop.fs.Path tablePath) throws IOException
IOException
protected List<org.apache.hadoop.fs.FileStatus> listStatus(org.apache.hadoop.fs.Path... dirs) throws IOException
IOException
- if zero items.protected boolean isSplittable(TableMeta meta, Schema schema, org.apache.hadoop.fs.Path path, org.apache.hadoop.fs.FileStatus status) throws IOException
FileInputFormat
implementations can override this and return
false
to ensure that individual input files are never split-up
so that Mappers process entire files.path
- the file name to checkstatus
- get the file lengthIOException
protected int getBlockIndex(org.apache.hadoop.fs.BlockLocation[] blkLocations, long offset)
protected FileFragment makeSplit(String fragmentId, org.apache.hadoop.fs.Path file, long start, long length)
protected FileFragment makeSplit(String fragmentId, org.apache.hadoop.fs.Path file, long start, long length, String[] hosts)
protected FileFragment makeSplit(String fragmentId, org.apache.hadoop.fs.Path file, org.apache.hadoop.fs.BlockLocation blockLocation) throws IOException
IOException
protected FileFragment makeNonSplit(String fragmentId, org.apache.hadoop.fs.Path file, long start, long length, org.apache.hadoop.fs.BlockLocation[] blkLocations) throws IOException
IOException
public long getMinSplitSize()
public List<FileFragment> getSplits(String tableName, TableMeta meta, Schema schema, org.apache.hadoop.fs.Path... inputs) throws IOException
IOException
public static <T> T newScannerInstance(Class<T> theClass, org.apache.hadoop.conf.Configuration conf, Schema schema, TableMeta meta, Fragment fragment)
Copyright © 2014 Apache Software Foundation. All Rights Reserved.