|
||||||||||
PREV NEXT | FRAMES NO FRAMES |
Packages that use TableDesc | |
---|---|
org.apache.hadoop.hive.ql.exec | Hive QL execution tasks, operators, functions and other handlers. |
org.apache.hadoop.hive.ql.exec.persistence | |
org.apache.hadoop.hive.ql.io | |
org.apache.hadoop.hive.ql.io.rcfile.merge | |
org.apache.hadoop.hive.ql.metadata | |
org.apache.hadoop.hive.ql.optimizer | |
org.apache.hadoop.hive.ql.plan |
Uses of TableDesc in org.apache.hadoop.hive.ql.exec |
---|
Fields in org.apache.hadoop.hive.ql.exec declared as TableDesc | |
---|---|
static TableDesc |
Utilities.defaultTd
|
Fields in org.apache.hadoop.hive.ql.exec with type parameters of type TableDesc | |
---|---|
protected Map<Byte,TableDesc> |
HashTableSinkOperator.spillTableDesc
|
protected Map<Byte,TableDesc> |
CommonJoinOperator.spillTableDesc
|
Methods in org.apache.hadoop.hive.ql.exec that return TableDesc | |
---|---|
TableDesc |
FetchOperator.getCurrTbl()
|
static TableDesc |
JoinUtil.getSpillTableDesc(Byte alias,
Map<Byte,TableDesc> spillTableDesc,
JoinDesc conf,
boolean noFilter)
|
TableDesc |
TableScanOperator.getTableDesc()
|
static TableDesc |
Utilities.getTableDesc(String cols,
String colTypes)
|
static TableDesc |
Utilities.getTableDesc(Table tbl)
|
TableDesc |
HashTableSinkOperator.HashTableSinkObjectCtx.getTblDesc()
|
TableDesc |
FetchTask.getTblDesc()
Return the tableDesc of the fetchWork. |
Methods in org.apache.hadoop.hive.ql.exec that return types with arguments of type TableDesc | |
---|---|
static Map<Byte,TableDesc> |
JoinUtil.getSpillTableDesc(Map<Byte,TableDesc> spillTableDesc,
JoinDesc conf,
boolean noFilter)
|
static Map<Byte,TableDesc> |
JoinUtil.initSpillTables(JoinDesc conf,
boolean noFilter)
|
Methods in org.apache.hadoop.hive.ql.exec with parameters of type TableDesc | |
---|---|
static void |
Utilities.copyTableJobPropertiesToConf(TableDesc tbl,
org.apache.hadoop.mapred.JobConf job)
Copies the storage handler properties configured for a table descriptor to a runtime job configuration. |
static PartitionDesc |
Utilities.getPartitionDescFromTableDesc(TableDesc tblDesc,
Partition part)
|
void |
FetchOperator.setCurrTbl(TableDesc currTbl)
|
void |
TableScanOperator.setTableDesc(TableDesc tableDesc)
|
Method parameters in org.apache.hadoop.hive.ql.exec with type arguments of type TableDesc | |
---|---|
static RowContainer |
JoinUtil.getRowContainer(org.apache.hadoop.conf.Configuration hconf,
List<ObjectInspector> structFieldObjectInspectors,
Byte alias,
int containerSize,
Map<Byte,TableDesc> spillTableDesc,
JoinDesc conf,
boolean noFilter,
org.apache.hadoop.mapred.Reporter reporter)
|
static SerDe |
JoinUtil.getSpillSerDe(byte alias,
Map<Byte,TableDesc> spillTableDesc,
JoinDesc conf,
boolean noFilter)
|
static TableDesc |
JoinUtil.getSpillTableDesc(Byte alias,
Map<Byte,TableDesc> spillTableDesc,
JoinDesc conf,
boolean noFilter)
|
static Map<Byte,TableDesc> |
JoinUtil.getSpillTableDesc(Map<Byte,TableDesc> spillTableDesc,
JoinDesc conf,
boolean noFilter)
|
Constructors in org.apache.hadoop.hive.ql.exec with parameters of type TableDesc | |
---|---|
HashTableSinkOperator.HashTableSinkObjectCtx(ObjectInspector standardOI,
SerDe serde,
TableDesc tblDesc,
org.apache.hadoop.conf.Configuration conf)
|
Uses of TableDesc in org.apache.hadoop.hive.ql.exec.persistence |
---|
Methods in org.apache.hadoop.hive.ql.exec.persistence with parameters of type TableDesc | |
---|---|
void |
RowContainer.setTableDesc(TableDesc tblDesc)
|
Uses of TableDesc in org.apache.hadoop.hive.ql.io |
---|
Methods in org.apache.hadoop.hive.ql.io with parameters of type TableDesc | |
---|---|
static FileSinkOperator.RecordWriter |
HiveFileFormatUtils.getHiveRecordWriter(org.apache.hadoop.mapred.JobConf jc,
TableDesc tableInfo,
Class<? extends org.apache.hadoop.io.Writable> outputClass,
FileSinkDesc conf,
org.apache.hadoop.fs.Path outPath,
org.apache.hadoop.mapred.Reporter reporter)
|
Uses of TableDesc in org.apache.hadoop.hive.ql.io.rcfile.merge |
---|
Methods in org.apache.hadoop.hive.ql.io.rcfile.merge with parameters of type TableDesc | |
---|---|
void |
MergeWork.resolveDynamicPartitionMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc)
|
Uses of TableDesc in org.apache.hadoop.hive.ql.metadata |
---|
Methods in org.apache.hadoop.hive.ql.metadata with parameters of type TableDesc | |
---|---|
void |
HiveStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
This method is called to allow the StorageHandlers the chance to populate the JobContext.getConfiguration() with properties that maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc). |
void |
DefaultStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
|
void |
HiveStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
This method is called to allow the StorageHandlers the chance to populate the JobContext.getConfiguration() with properties that maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc). |
void |
DefaultStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
|
void |
HiveStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
Deprecated. |
void |
DefaultStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
|
Uses of TableDesc in org.apache.hadoop.hive.ql.optimizer |
---|
Methods in org.apache.hadoop.hive.ql.optimizer that return TableDesc | |
---|---|
TableDesc |
GenMRProcContext.GenMRMapJoinCtx.getTTDesc()
|
Methods in org.apache.hadoop.hive.ql.optimizer that return types with arguments of type TableDesc | |
---|---|
List<TableDesc> |
GenMRProcContext.GenMRUnionCtx.getTTDesc()
|
Methods in org.apache.hadoop.hive.ql.optimizer with parameters of type TableDesc | |
---|---|
void |
GenMRProcContext.GenMRUnionCtx.addTTDesc(TableDesc tt_desc)
|
static void |
GenMapRedUtils.setTaskPlan(String path,
String alias,
Operator<? extends OperatorDesc> topOp,
MapredWork plan,
boolean local,
TableDesc tt_desc)
set the current task in the mapredWork. |
void |
GenMRProcContext.GenMRMapJoinCtx.setTTDesc(TableDesc tt_desc)
|
Constructors in org.apache.hadoop.hive.ql.optimizer with parameters of type TableDesc | |
---|---|
GenMRProcContext.GenMRMapJoinCtx(String taskTmpDir,
TableDesc tt_desc,
Operator<? extends OperatorDesc> rootMapJoinOp,
AbstractMapJoinOperator<? extends MapJoinDesc> oldMapJoin)
|
Uses of TableDesc in org.apache.hadoop.hive.ql.plan |
---|
Methods in org.apache.hadoop.hive.ql.plan that return TableDesc | |
---|---|
static TableDesc |
PlanUtils.getDefaultQueryOutputTableDesc(String cols,
String colTypes,
String fileFormat)
|
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the separatorCode. |
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the separatorCode and column names (comma separated string). |
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the separatorCode and column names (comma separated string), and whether the last column should take the rest of the line. |
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the separatorCode and column names (comma separated string), and whether the last column should take the rest of the line. |
static TableDesc |
PlanUtils.getIntermediateFileTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for intermediate files. |
TableDesc |
MapredWork.getKeyDesc()
|
TableDesc |
ReduceSinkDesc.getKeySerializeInfo()
|
TableDesc |
JoinDesc.getKeyTableDesc()
|
TableDesc |
HashTableSinkDesc.getKeyTableDesc()
|
TableDesc |
HashTableSinkDesc.getKeyTblDesc()
|
TableDesc |
MapJoinDesc.getKeyTblDesc()
|
static TableDesc |
PlanUtils.getMapJoinKeyTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for Map-side join key. |
static TableDesc |
PlanUtils.getMapJoinValueTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for Map-side join key. |
static TableDesc |
PlanUtils.getReduceKeyTableDesc(List<FieldSchema> fieldSchemas,
String order)
Generate the table descriptor for reduce key. |
static TableDesc |
PlanUtils.getReduceValueTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for intermediate files. |
TableDesc |
ScriptDesc.getScriptErrInfo()
|
TableDesc |
ScriptDesc.getScriptInputInfo()
|
TableDesc |
ScriptDesc.getScriptOutputInfo()
|
TableDesc |
LoadTableDesc.getTable()
|
TableDesc |
PartitionDesc.getTableDesc()
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns)
Generate the table descriptor of given serde with the separatorCode and column names (comma separated string). |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of the serde specified with the separatorCode and column names (comma separated string), and whether the last column should take the rest of the line. |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine)
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine,
boolean useDelimitedJSON)
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine,
boolean useDelimitedJSON,
String fileFormat)
|
static TableDesc |
PlanUtils.getTableDesc(CreateTableDesc crtTblDesc,
String cols,
String colTypes)
Generate a table descriptor from a createTableDesc. |
TableDesc |
FileSinkDesc.getTableInfo()
|
TableDesc |
HashTableDummyDesc.getTbl()
|
TableDesc |
FetchWork.getTblDesc()
|
TableDesc |
ReduceSinkDesc.getValueSerializeInfo()
|
Methods in org.apache.hadoop.hive.ql.plan that return types with arguments of type TableDesc | |
---|---|
Map<Byte,TableDesc> |
JoinDesc.getSkewKeysValuesTables()
|
Map<Byte,TableDesc> |
HashTableSinkDesc.getSkewKeysValuesTables()
|
List<TableDesc> |
MapredWork.getTagToValueDesc()
|
List<TableDesc> |
MapJoinDesc.getValueFilteredTblDescs()
|
List<TableDesc> |
HashTableSinkDesc.getValueTblDescs()
|
List<TableDesc> |
MapJoinDesc.getValueTblDescs()
|
List<TableDesc> |
HashTableSinkDesc.getValueTblFilteredDescs()
|
Methods in org.apache.hadoop.hive.ql.plan with parameters of type TableDesc | |
---|---|
static void |
PlanUtils.configureInputJobPropertiesForStorageHandler(TableDesc tableDesc)
Loads the storage handler (if one exists) for the given table and invokes HiveStorageHandler.configureInputJobProperties(TableDesc, java.util.Map) . |
static void |
PlanUtils.configureOutputJobPropertiesForStorageHandler(TableDesc tableDesc)
Loads the storage handler (if one exists) for the given table and invokes HiveStorageHandler.configureOutputJobProperties(TableDesc, java.util.Map) . |
void |
MapredWork.resolveDynamicPartitionMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc)
|
void |
MapredWork.setKeyDesc(TableDesc keyDesc)
|
void |
ReduceSinkDesc.setKeySerializeInfo(TableDesc keySerializeInfo)
|
void |
JoinDesc.setKeyTableDesc(TableDesc keyTblDesc)
|
void |
HashTableSinkDesc.setKeyTableDesc(TableDesc keyTableDesc)
|
void |
HashTableSinkDesc.setKeyTblDesc(TableDesc keyTblDesc)
|
void |
MapJoinDesc.setKeyTblDesc(TableDesc keyTblDesc)
|
void |
ScriptDesc.setScriptErrInfo(TableDesc scriptErrInfo)
|
void |
ScriptDesc.setScriptInputInfo(TableDesc scriptInputInfo)
|
void |
ScriptDesc.setScriptOutputInfo(TableDesc scriptOutputInfo)
|
void |
LoadTableDesc.setTable(TableDesc table)
|
void |
PartitionDesc.setTableDesc(TableDesc tableDesc)
|
void |
FileSinkDesc.setTableInfo(TableDesc tableInfo)
|
void |
HashTableDummyDesc.setTbl(TableDesc tbl)
|
void |
FetchWork.setTblDesc(TableDesc tblDesc)
|
void |
ReduceSinkDesc.setValueSerializeInfo(TableDesc valueSerializeInfo)
|
Method parameters in org.apache.hadoop.hive.ql.plan with type arguments of type TableDesc | |
---|---|
void |
JoinDesc.setSkewKeysValuesTables(Map<Byte,TableDesc> skewKeysValuesTables)
|
void |
HashTableSinkDesc.setSkewKeysValuesTables(Map<Byte,TableDesc> skewKeysValuesTables)
|
void |
MapredWork.setTagToValueDesc(List<TableDesc> tagToValueDesc)
|
void |
MapJoinDesc.setValueFilteredTblDescs(List<TableDesc> valueFilteredTblDescs)
|
void |
HashTableSinkDesc.setValueTblDescs(List<TableDesc> valueTblDescs)
|
void |
MapJoinDesc.setValueTblDescs(List<TableDesc> valueTblDescs)
|
void |
HashTableSinkDesc.setValueTblFilteredDescs(List<TableDesc> valueTblFilteredDescs)
|
Constructors in org.apache.hadoop.hive.ql.plan with parameters of type TableDesc | |
---|---|
FetchWork(List<String> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc)
|
|
FetchWork(List<String> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc,
int limit)
|
|
FetchWork(String tblDir,
TableDesc tblDesc)
|
|
FetchWork(String tblDir,
TableDesc tblDesc,
int limit)
|
|
FileSinkDesc(String dirName,
TableDesc tableInfo,
boolean compressed)
|
|
FileSinkDesc(String dirName,
TableDesc tableInfo,
boolean compressed,
int destTableId,
boolean multiFileSpray,
int numFiles,
int totalFiles,
ArrayList<ExprNodeDesc> partitionCols,
DynamicPartitionCtx dpCtx)
|
|
LoadTableDesc(String sourceDir,
String tmpDir,
TableDesc table,
DynamicPartitionCtx dpCtx)
|
|
LoadTableDesc(String sourceDir,
String tmpDir,
TableDesc table,
Map<String,String> partitionSpec)
|
|
LoadTableDesc(String sourceDir,
String tmpDir,
TableDesc table,
Map<String,String> partitionSpec,
boolean replace)
|
|
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix)
|
|
MapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,PartitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork,
TableDesc keyDesc,
List<TableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks,
MapredLocalWork mapLocalWork,
boolean hadoopSupportsSplittable)
|
|
PartitionDesc(Partition part,
TableDesc tblDesc)
|
|
PartitionDesc(TableDesc table,
LinkedHashMap<String,String> partSpec)
|
|
PartitionDesc(TableDesc table,
LinkedHashMap<String,String> partSpec,
Class<? extends Deserializer> serdeClass,
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFileFormatClass,
Class<?> outputFormat,
Properties properties,
String serdeClassName)
|
|
ReduceSinkDesc(ArrayList<ExprNodeDesc> keyCols,
int numDistributionKeys,
ArrayList<ExprNodeDesc> valueCols,
ArrayList<String> outputKeyColumnNames,
List<List<Integer>> distinctColumnIndices,
ArrayList<String> outputValueColumnNames,
int tag,
ArrayList<ExprNodeDesc> partitionCols,
int numReducers,
TableDesc keySerializeInfo,
TableDesc valueSerializeInfo)
|
|
ScriptDesc(String scriptCmd,
TableDesc scriptInputInfo,
Class<? extends RecordWriter> inRecordWriterClass,
TableDesc scriptOutputInfo,
Class<? extends RecordReader> outRecordReaderClass,
Class<? extends RecordReader> errRecordReaderClass,
TableDesc scriptErrInfo)
|
Constructor parameters in org.apache.hadoop.hive.ql.plan with type arguments of type TableDesc | |
---|---|
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix)
|
|
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix)
|
|
MapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,PartitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork,
TableDesc keyDesc,
List<TableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks,
MapredLocalWork mapLocalWork,
boolean hadoopSupportsSplittable)
|
|
||||||||||
PREV NEXT | FRAMES NO FRAMES |