org.apache.hadoop.hive.ql.plan
Class MapredWork
java.lang.Object
org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc
org.apache.hadoop.hive.ql.plan.MapredWork
- All Implemented Interfaces:
- Serializable, Cloneable, OperatorDesc
- Direct Known Subclasses:
- MergeWork
public class MapredWork
- extends AbstractOperatorDesc
MapredWork.
- See Also:
- Serialized Form
Constructor Summary |
MapredWork()
|
MapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,PartitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork,
TableDesc keyDesc,
List<TableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks,
MapredLocalWork mapLocalWork,
boolean hadoopSupportsSplittable)
|
Method Summary |
void |
addIndexIntermediateFile(String fileName)
|
void |
addMapWork(String path,
String alias,
Operator<?> work,
PartitionDesc pd)
|
void |
deriveExplainAttributes()
Derive additional attributes to be rendered by EXPLAIN. |
LinkedHashMap<String,PartitionDesc> |
getAliasToPartnInfo()
|
LinkedHashMap<String,Operator<? extends OperatorDesc>> |
getAliasToWork()
|
List<Operator<?>> |
getAllOperators()
|
String |
getCommand()
|
boolean |
getHadoopSupportsSplittable()
|
String |
getIndexIntermediateFile()
|
String |
getInputformat()
|
QBJoinTree |
getJoinTree()
|
TableDesc |
getKeyDesc()
|
MapredLocalWork |
getMapLocalWork()
|
Long |
getMaxSplitSize()
|
Long |
getMinSplitSize()
|
Long |
getMinSplitSizePerNode()
|
Long |
getMinSplitSizePerRack()
|
HashMap<String,SplitSample> |
getNameToSplitSample()
|
boolean |
getNeedsTagging()
|
Integer |
getNumMapTasks()
|
Integer |
getNumReduceTasks()
If the number of reducers is -1, the runtime will automatically figure it
out by input data size. |
LinkedHashMap<Operator<? extends OperatorDesc>,OpParseContext> |
getOpParseCtxMap()
|
LinkedHashMap<String,ArrayList<String>> |
getPathToAliases()
|
LinkedHashMap<String,PartitionDesc> |
getPathToPartitionInfo()
|
Operator<?> |
getReducer()
|
List<TableDesc> |
getTagToValueDesc()
|
String |
getTmpHDFSFileURI()
|
Map<String,ArrayList<String>> |
getTruncatedPathToAliases()
|
void |
initialize()
|
boolean |
isGatheringStats()
|
boolean |
isInputFormatSorted()
|
String |
isInvalid()
|
boolean |
isMapperCannotSpanPartns()
|
boolean |
isUseBucketizedHiveInputFormat()
|
void |
resolveDynamicPartitionMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc)
|
void |
setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo)
|
void |
setAliasToWork(LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork)
|
void |
setCommand(String command)
|
void |
setGatheringStats(boolean gatherStats)
|
void |
setHadoopSupportsSplittable(boolean hadoopSupportsSplittable)
|
void |
setInputformat(String inputformat)
|
void |
setInputFormatSorted(boolean inputFormatSorted)
|
void |
setJoinTree(QBJoinTree joinTree)
|
void |
setKeyDesc(TableDesc keyDesc)
|
void |
setMapLocalWork(MapredLocalWork mapLocalWork)
|
void |
setMapperCannotSpanPartns(boolean mapperCannotSpanPartns)
|
void |
setMaxSplitSize(Long maxSplitSize)
|
void |
setMinSplitSize(Long minSplitSize)
|
void |
setMinSplitSizePerNode(Long minSplitSizePerNode)
|
void |
setMinSplitSizePerRack(Long minSplitSizePerRack)
|
void |
setNameToSplitSample(HashMap<String,SplitSample> nameToSplitSample)
|
void |
setNeedsTagging(boolean needsTagging)
|
void |
setNumMapTasks(Integer numMapTasks)
|
void |
setNumReduceTasks(Integer numReduceTasks)
|
void |
setOpParseCtxMap(LinkedHashMap<Operator<? extends OperatorDesc>,OpParseContext> opParseCtxMap)
|
void |
setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
|
void |
setPathToPartitionInfo(LinkedHashMap<String,PartitionDesc> pathToPartitionInfo)
|
void |
setReducer(Operator<?> reducer)
|
void |
setTagToValueDesc(List<TableDesc> tagToValueDesc)
|
void |
setTmpHDFSFileURI(String tmpHDFSFileURI)
|
void |
setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat)
|
String |
toXML()
|
MapredWork
public MapredWork()
MapredWork
public MapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,PartitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork,
TableDesc keyDesc,
List<TableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks,
MapredLocalWork mapLocalWork,
boolean hadoopSupportsSplittable)
getCommand
public String getCommand()
setCommand
public void setCommand(String command)
getPathToAliases
public LinkedHashMap<String,ArrayList<String>> getPathToAliases()
setPathToAliases
public void setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
getTruncatedPathToAliases
public Map<String,ArrayList<String>> getTruncatedPathToAliases()
getPathToPartitionInfo
public LinkedHashMap<String,PartitionDesc> getPathToPartitionInfo()
setPathToPartitionInfo
public void setPathToPartitionInfo(LinkedHashMap<String,PartitionDesc> pathToPartitionInfo)
getAliasToPartnInfo
public LinkedHashMap<String,PartitionDesc> getAliasToPartnInfo()
- Returns:
- the aliasToPartnInfo
setAliasToPartnInfo
public void setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo)
- Parameters:
aliasToPartnInfo
- the aliasToPartnInfo to set
getAliasToWork
public LinkedHashMap<String,Operator<? extends OperatorDesc>> getAliasToWork()
setAliasToWork
public void setAliasToWork(LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork)
getMapLocalWork
public MapredLocalWork getMapLocalWork()
- Returns:
- the mapredLocalWork
setMapLocalWork
public void setMapLocalWork(MapredLocalWork mapLocalWork)
- Parameters:
mapLocalWork
- the mapredLocalWork to set
getKeyDesc
public TableDesc getKeyDesc()
setKeyDesc
public void setKeyDesc(TableDesc keyDesc)
getTagToValueDesc
public List<TableDesc> getTagToValueDesc()
setTagToValueDesc
public void setTagToValueDesc(List<TableDesc> tagToValueDesc)
getReducer
public Operator<?> getReducer()
getNameToSplitSample
public HashMap<String,SplitSample> getNameToSplitSample()
setNameToSplitSample
public void setNameToSplitSample(HashMap<String,SplitSample> nameToSplitSample)
setReducer
public void setReducer(Operator<?> reducer)
getNumMapTasks
public Integer getNumMapTasks()
setNumMapTasks
public void setNumMapTasks(Integer numMapTasks)
getNumReduceTasks
public Integer getNumReduceTasks()
- If the number of reducers is -1, the runtime will automatically figure it
out by input data size.
The number of reducers will be a positive number only in case the target
table is bucketed into N buckets (through CREATE TABLE). This feature is
not supported yet, so the number of reducers will always be -1 for now.
setNumReduceTasks
public void setNumReduceTasks(Integer numReduceTasks)
addMapWork
public void addMapWork(String path,
String alias,
Operator<?> work,
PartitionDesc pd)
isInvalid
public String isInvalid()
toXML
public String toXML()
deriveExplainAttributes
public void deriveExplainAttributes()
- Derive additional attributes to be rendered by EXPLAIN.
initialize
public void initialize()
getNeedsTagging
public boolean getNeedsTagging()
setNeedsTagging
public void setNeedsTagging(boolean needsTagging)
getHadoopSupportsSplittable
public boolean getHadoopSupportsSplittable()
setHadoopSupportsSplittable
public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable)
getMaxSplitSize
public Long getMaxSplitSize()
setMaxSplitSize
public void setMaxSplitSize(Long maxSplitSize)
getMinSplitSize
public Long getMinSplitSize()
setMinSplitSize
public void setMinSplitSize(Long minSplitSize)
getMinSplitSizePerNode
public Long getMinSplitSizePerNode()
setMinSplitSizePerNode
public void setMinSplitSizePerNode(Long minSplitSizePerNode)
getMinSplitSizePerRack
public Long getMinSplitSizePerRack()
setMinSplitSizePerRack
public void setMinSplitSizePerRack(Long minSplitSizePerRack)
getInputformat
public String getInputformat()
setInputformat
public void setInputformat(String inputformat)
getIndexIntermediateFile
public String getIndexIntermediateFile()
addIndexIntermediateFile
public void addIndexIntermediateFile(String fileName)
setGatheringStats
public void setGatheringStats(boolean gatherStats)
isGatheringStats
public boolean isGatheringStats()
setMapperCannotSpanPartns
public void setMapperCannotSpanPartns(boolean mapperCannotSpanPartns)
isMapperCannotSpanPartns
public boolean isMapperCannotSpanPartns()
getTmpHDFSFileURI
public String getTmpHDFSFileURI()
setTmpHDFSFileURI
public void setTmpHDFSFileURI(String tmpHDFSFileURI)
getJoinTree
public QBJoinTree getJoinTree()
setJoinTree
public void setJoinTree(QBJoinTree joinTree)
getOpParseCtxMap
public LinkedHashMap<Operator<? extends OperatorDesc>,OpParseContext> getOpParseCtxMap()
setOpParseCtxMap
public void setOpParseCtxMap(LinkedHashMap<Operator<? extends OperatorDesc>,OpParseContext> opParseCtxMap)
isInputFormatSorted
public boolean isInputFormatSorted()
setInputFormatSorted
public void setInputFormatSorted(boolean inputFormatSorted)
resolveDynamicPartitionMerge
public void resolveDynamicPartitionMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc)
getAllOperators
public List<Operator<?>> getAllOperators()
isUseBucketizedHiveInputFormat
public boolean isUseBucketizedHiveInputFormat()
setUseBucketizedHiveInputFormat
public void setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat)
Copyright © 2011 The Apache Software Foundation