org.apache.hadoop.hive.ql.plan
Class MapredWork
java.lang.Object
org.apache.hadoop.hive.ql.plan.MapredWork
- All Implemented Interfaces:
- Serializable
public class MapredWork
- extends Object
- implements Serializable
MapredWork.
- See Also:
- Serialized Form
Constructor Summary |
MapredWork()
|
MapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,PartitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork,
TableDesc keyDesc,
List<TableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks,
MapredLocalWork mapLocalWork,
boolean hadoopSupportsSplittable)
|
Method Summary |
void |
addMapWork(String path,
String alias,
Operator<?> work,
PartitionDesc pd)
|
void |
deriveExplainAttributes()
Derive additional attributes to be rendered by EXPLAIN. |
LinkedHashMap<String,PartitionDesc> |
getAliasToPartnInfo()
|
LinkedHashMap<String,Operator<? extends Serializable>> |
getAliasToWork()
|
String |
getCommand()
|
boolean |
getHadoopSupportsSplittable()
|
String |
getInputformat()
|
QBJoinTree |
getJoinTree()
|
TableDesc |
getKeyDesc()
|
MapredLocalWork |
getMapLocalWork()
|
Long |
getMinSplitSize()
|
boolean |
getNeedsTagging()
|
Integer |
getNumMapTasks()
|
Integer |
getNumReduceTasks()
If the number of reducers is -1, the runtime will automatically figure it
out by input data size. |
LinkedHashMap<Operator<? extends Serializable>,OpParseContext> |
getOpParseCtxMap()
|
LinkedHashMap<String,ArrayList<String>> |
getPathToAliases()
|
LinkedHashMap<String,PartitionDesc> |
getPathToPartitionInfo()
|
Operator<?> |
getReducer()
|
List<TableDesc> |
getTagToValueDesc()
|
String |
getTmpHDFSFileURI()
|
void |
initialize()
|
boolean |
isGatheringStats()
|
String |
isInvalid()
|
boolean |
isMapperCannotSpanPartns()
|
void |
setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo)
|
void |
setAliasToWork(LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork)
|
void |
setCommand(String command)
|
void |
setGatheringStats(boolean gatherStats)
|
void |
setHadoopSupportsSplittable(boolean hadoopSupportsSplittable)
|
void |
setInputformat(String inputformat)
|
void |
setJoinTree(QBJoinTree joinTree)
|
void |
setKeyDesc(TableDesc keyDesc)
|
void |
setMapLocalWork(MapredLocalWork mapLocalWork)
|
void |
setMapperCannotSpanPartns(boolean mapperCannotSpanPartns)
|
void |
setMinSplitSize(Long minSplitSize)
|
void |
setNeedsTagging(boolean needsTagging)
|
void |
setNumMapTasks(Integer numMapTasks)
|
void |
setNumReduceTasks(Integer numReduceTasks)
|
void |
setOpParseCtxMap(LinkedHashMap<Operator<? extends Serializable>,OpParseContext> opParseCtxMap)
|
void |
setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
|
void |
setPathToPartitionInfo(LinkedHashMap<String,PartitionDesc> pathToPartitionInfo)
|
void |
setReducer(Operator<?> reducer)
|
void |
setTagToValueDesc(List<TableDesc> tagToValueDesc)
|
void |
setTmpHDFSFileURI(String tmpHDFSFileURI)
|
String |
toXML()
|
Methods inherited from class java.lang.Object |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
MapredWork
public MapredWork()
MapredWork
public MapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,PartitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork,
TableDesc keyDesc,
List<TableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks,
MapredLocalWork mapLocalWork,
boolean hadoopSupportsSplittable)
getCommand
public String getCommand()
setCommand
public void setCommand(String command)
getPathToAliases
public LinkedHashMap<String,ArrayList<String>> getPathToAliases()
setPathToAliases
public void setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
getPathToPartitionInfo
public LinkedHashMap<String,PartitionDesc> getPathToPartitionInfo()
setPathToPartitionInfo
public void setPathToPartitionInfo(LinkedHashMap<String,PartitionDesc> pathToPartitionInfo)
getAliasToPartnInfo
public LinkedHashMap<String,PartitionDesc> getAliasToPartnInfo()
- Returns:
- the aliasToPartnInfo
setAliasToPartnInfo
public void setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo)
- Parameters:
aliasToPartnInfo
- the aliasToPartnInfo to set
getAliasToWork
public LinkedHashMap<String,Operator<? extends Serializable>> getAliasToWork()
setAliasToWork
public void setAliasToWork(LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork)
getMapLocalWork
public MapredLocalWork getMapLocalWork()
- Returns:
- the mapredLocalWork
setMapLocalWork
public void setMapLocalWork(MapredLocalWork mapLocalWork)
- Parameters:
mapLocalWork
- the mapredLocalWork to set
getKeyDesc
public TableDesc getKeyDesc()
setKeyDesc
public void setKeyDesc(TableDesc keyDesc)
getTagToValueDesc
public List<TableDesc> getTagToValueDesc()
setTagToValueDesc
public void setTagToValueDesc(List<TableDesc> tagToValueDesc)
getReducer
public Operator<?> getReducer()
setReducer
public void setReducer(Operator<?> reducer)
getNumMapTasks
public Integer getNumMapTasks()
setNumMapTasks
public void setNumMapTasks(Integer numMapTasks)
getNumReduceTasks
public Integer getNumReduceTasks()
- If the number of reducers is -1, the runtime will automatically figure it
out by input data size.
The number of reducers will be a positive number only in case the target
table is bucketed into N buckets (through CREATE TABLE). This feature is
not supported yet, so the number of reducers will always be -1 for now.
setNumReduceTasks
public void setNumReduceTasks(Integer numReduceTasks)
addMapWork
public void addMapWork(String path,
String alias,
Operator<?> work,
PartitionDesc pd)
isInvalid
public String isInvalid()
toXML
public String toXML()
deriveExplainAttributes
public void deriveExplainAttributes()
- Derive additional attributes to be rendered by EXPLAIN.
initialize
public void initialize()
getNeedsTagging
public boolean getNeedsTagging()
setNeedsTagging
public void setNeedsTagging(boolean needsTagging)
getHadoopSupportsSplittable
public boolean getHadoopSupportsSplittable()
setHadoopSupportsSplittable
public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable)
getMinSplitSize
public Long getMinSplitSize()
setMinSplitSize
public void setMinSplitSize(Long minSplitSize)
getInputformat
public String getInputformat()
setInputformat
public void setInputformat(String inputformat)
setGatheringStats
public void setGatheringStats(boolean gatherStats)
isGatheringStats
public boolean isGatheringStats()
setMapperCannotSpanPartns
public void setMapperCannotSpanPartns(boolean mapperCannotSpanPartns)
isMapperCannotSpanPartns
public boolean isMapperCannotSpanPartns()
getTmpHDFSFileURI
public String getTmpHDFSFileURI()
setTmpHDFSFileURI
public void setTmpHDFSFileURI(String tmpHDFSFileURI)
getJoinTree
public QBJoinTree getJoinTree()
setJoinTree
public void setJoinTree(QBJoinTree joinTree)
getOpParseCtxMap
public LinkedHashMap<Operator<? extends Serializable>,OpParseContext> getOpParseCtxMap()
setOpParseCtxMap
public void setOpParseCtxMap(LinkedHashMap<Operator<? extends Serializable>,OpParseContext> opParseCtxMap)
Copyright © 2011 The Apache Software Foundation