|
||||||||||
PREV NEXT | FRAMES NO FRAMES |
Uses of HiveException in org.apache.hadoop.hive.ql.exec |
---|
Subclasses of HiveException in org.apache.hadoop.hive.ql.exec | |
---|---|
class |
AmbiguousMethodException
Exception thrown by the UDF and UDAF method resolvers in case a unique method is not found. |
class |
UDFArgumentException
exception class, thrown when udf argument have something wrong. |
class |
UDFArgumentLengthException
exception class, thrown when udf arguments have wrong length. |
class |
UDFArgumentTypeException
exception class, thrown when udf arguments have wrong types. |
Methods in org.apache.hadoop.hive.ql.exec that throw HiveException | |
---|---|
protected void |
CommonJoinOperator.checkAndGenObject()
|
void |
FetchOperator.clearFetchContext()
Clear the context, if anything needs to be done. |
void |
ScriptOperator.close(boolean abort)
|
void |
Operator.close(boolean abort)
|
void |
FileSinkOperator.closeOp(boolean abort)
|
protected void |
UDTFOperator.closeOp(boolean abort)
|
void |
MapJoinOperator.closeOp(boolean abort)
|
void |
GroupByOperator.closeOp(boolean abort)
We need to forward all the aggregations to children. |
void |
MapOperator.closeOp(boolean abort)
close extra child operators that are initialized but are not executed. |
void |
CommonJoinOperator.closeOp(boolean abort)
All done |
protected void |
Operator.closeOp(boolean abort)
Operator specific close routine. |
protected static ArrayList<Object> |
CommonJoinOperator.computeValues(Object row,
List<ExprNodeEvaluator> valueFields,
List<ObjectInspector> valueFieldsOI)
Return the value as a standard object. |
void |
GroupByOperator.endGroup()
|
void |
CommonJoinOperator.endGroup()
Forward a record of join results. |
void |
Operator.endGroup()
|
Object |
ExprNodeNullEvaluator.evaluate(Object row)
|
Object |
ExprNodeGenericFuncEvaluator.evaluate(Object row)
|
Object |
ExprNodeFieldEvaluator.evaluate(Object row)
|
Object |
ExprNodeConstantEvaluator.evaluate(Object row)
|
Object |
ExprNodeColumnEvaluator.evaluate(Object row)
|
abstract Object |
ExprNodeEvaluator.evaluate(Object row)
Evaluate the expression given the row. |
protected void |
GroupByOperator.forward(ArrayList<Object> keys,
GenericUDAFEvaluator.AggregationBuffer[] aggs)
Forward a record of keys and aggregation results. |
protected void |
Operator.forward(Object row,
ObjectInspector rowInspector)
|
void |
UDTFOperator.forwardUDTFOutput(Object o)
forwardUDTFOutput is typically called indirectly by the GenericUDTF when the GenericUDTF has generated output rows that should be passed on to the next operator(s) in the DAG. |
protected static HashMap<Byte,List<ObjectInspector>> |
CommonJoinOperator.getObjectInspectorsFromEvaluators(Map<Byte,List<ExprNodeEvaluator>> exprEntries,
ObjectInspector[] inputObjInspector)
|
ObjectInspector |
FetchOperator.getOutputObjectInspector()
|
static partitionDesc |
Utilities.getPartitionDesc(Partition part)
|
static FileSinkOperator.RecordWriter |
FileSinkOperator.getRecordWriter(org.apache.hadoop.mapred.JobConf jc,
HiveOutputFormat<?,?> hiveOutputFormat,
Class<? extends org.apache.hadoop.io.Writable> valueClass,
boolean isCompressed,
Properties tableProp,
org.apache.hadoop.fs.Path outPath)
|
protected static ObjectInspector[] |
Operator.initEvaluators(ExprNodeEvaluator[] evals,
ObjectInspector rowInspector)
Initialize an array of ExprNodeEvaluator and return the result ObjectInspectors. |
protected static StructObjectInspector |
Operator.initEvaluatorsAndReturnStruct(ExprNodeEvaluator[] evals,
List<String> outputColName,
ObjectInspector rowInspector)
Initialize an array of ExprNodeEvaluator and put the return values into a StructObjectInspector with integer field names. |
void |
Operator.initialize(org.apache.hadoop.conf.Configuration hconf,
ObjectInspector[] inputOIs)
Initializes operators only if all parents have been initialized. |
ObjectInspector |
ExprNodeNullEvaluator.initialize(ObjectInspector rowInspector)
|
ObjectInspector |
ExprNodeGenericFuncEvaluator.initialize(ObjectInspector rowInspector)
|
ObjectInspector |
ExprNodeFieldEvaluator.initialize(ObjectInspector rowInspector)
|
ObjectInspector |
ExprNodeConstantEvaluator.initialize(ObjectInspector rowInspector)
|
ObjectInspector |
ExprNodeColumnEvaluator.initialize(ObjectInspector rowInspector)
|
abstract ObjectInspector |
ExprNodeEvaluator.initialize(ObjectInspector rowInspector)
Initialize should be called once and only once. |
void |
MapOperator.initializeAsRoot(org.apache.hadoop.conf.Configuration hconf,
mapredWork mrwork)
Initializes this map op as the root of the tree. |
protected void |
Operator.initializeChildren(org.apache.hadoop.conf.Configuration hconf)
Calls initialize on each of the children with outputObjetInspector as the output row format |
protected void |
FileSinkOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
UnionOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
UnionOperator will transform the input rows if the inputObjInspectors from different parents are different. |
protected void |
UDTFOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
SelectOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
ScriptOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
ReduceSinkOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
MapJoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
LimitOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
LateralViewJoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
JoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
GroupByOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
FilterOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
ExtractOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
void |
MapOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
CommonJoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
CollectOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
|
protected void |
Operator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
Operator specific initialization. |
static Object |
FunctionRegistry.invoke(Method m,
Object thisObject,
Object... arguments)
|
void |
FileSinkOperator.jobClose(org.apache.hadoop.conf.Configuration hconf,
boolean success)
|
void |
Operator.jobClose(org.apache.hadoop.conf.Configuration conf,
boolean success)
Unlike other operator interfaces which are called from map or reduce task, jobClose is called from the jobclient side once the job has completed |
static void |
ExecDriver.main(String[] args)
|
protected GenericUDAFEvaluator.AggregationBuffer[] |
GroupByOperator.newAggregations()
|
void |
Operator.process(Object row,
int tag)
Process the row. |
void |
MapOperator.process(org.apache.hadoop.io.Writable value)
|
void |
FileSinkOperator.processOp(Object row,
int tag)
|
void |
UnionOperator.processOp(Object row,
int tag)
|
void |
UDTFOperator.processOp(Object row,
int tag)
|
void |
TableScanOperator.processOp(Object row,
int tag)
Currently, the table scan operator does not do anything special other than just forwarding the row. |
void |
SelectOperator.processOp(Object row,
int tag)
|
void |
ScriptOperator.processOp(Object row,
int tag)
|
void |
ReduceSinkOperator.processOp(Object row,
int tag)
|
void |
MapJoinOperator.processOp(Object row,
int tag)
|
void |
LimitOperator.processOp(Object row,
int tag)
|
void |
LateralViewJoinOperator.processOp(Object row,
int tag)
An important assumption for processOp() is that for a given row from the TS, the LVJ will first get the row from the left select operator, followed by all the corresponding rows from the UDTF operator. |
void |
JoinOperator.processOp(Object row,
int tag)
|
void |
GroupByOperator.processOp(Object row,
int tag)
|
void |
ForwardOperator.processOp(Object row,
int tag)
|
void |
FilterOperator.processOp(Object row,
int tag)
|
void |
ExtractOperator.processOp(Object row,
int tag)
|
void |
MapOperator.processOp(Object row,
int tag)
|
void |
CollectOperator.processOp(Object row,
int tag)
|
abstract void |
Operator.processOp(Object row,
int tag)
Process the row. |
static void |
Utilities.rename(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
Rename src to dst, or in the case dst already exists, move files in src to dst. |
static void |
Utilities.renameOrMoveFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
Rename src to dst, or in the case dst already exists, move files in src to dst. |
protected void |
GroupByOperator.resetAggregations(GenericUDAFEvaluator.AggregationBuffer[] aggs)
|
void |
MapOperator.setChildren(org.apache.hadoop.conf.Configuration hconf)
|
void |
GroupByOperator.startGroup()
|
void |
CommonJoinOperator.startGroup()
|
void |
Operator.startGroup()
|
static void |
FunctionRegistry.unregisterTemporaryUDF(String functionName)
|
protected void |
GroupByOperator.updateAggregations(GenericUDAFEvaluator.AggregationBuffer[] aggs,
Object row,
ObjectInspector rowInspector,
boolean hashAggr,
boolean newEntryForHashAggr,
Object[][] lastInvoke)
|
Constructors in org.apache.hadoop.hive.ql.exec that throw HiveException | |
---|---|
ExecDriver(mapredWork plan,
org.apache.hadoop.mapred.JobConf job,
boolean isSilent)
Constructor/Initialization for invocation as independent utility |
Uses of HiveException in org.apache.hadoop.hive.ql.exec.persistence |
---|
Methods in org.apache.hadoop.hive.ql.exec.persistence that throw HiveException | |
---|---|
void |
RowContainer.add(Row t)
|
void |
HashMapWrapper.clear()
Clean up the hash table. |
void |
RowContainer.clear()
Remove all elements in the RowContainer. |
void |
HashMapWrapper.close()
Close the persistent hash table and clean it up. |
V |
HashMapWrapper.get(K key)
Get the value based on the key. |
void |
HashMapWrapper.put(K key,
V value)
Put the key value pair in the hash table. |
void |
HashMapWrapper.remove(Object key)
Remove one key-value pairs from the hash table based on the given key. |
Uses of HiveException in org.apache.hadoop.hive.ql.io |
---|
Methods in org.apache.hadoop.hive.ql.io that throw HiveException | |
---|---|
static boolean |
HiveFileFormatUtils.checkInputFormat(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormatCls,
ArrayList<org.apache.hadoop.fs.FileStatus> files)
checks if files are in same format as the given input format |
Uses of HiveException in org.apache.hadoop.hive.ql.metadata |
---|
Subclasses of HiveException in org.apache.hadoop.hive.ql.metadata | |
---|---|
class |
InvalidTableException
Generic exception class for Hive |
Methods in org.apache.hadoop.hive.ql.metadata that throw HiveException | |
---|---|
void |
Hive.alterPartition(String tblName,
Partition newPart)
Updates the existing table metadata with the new metadata. |
void |
Hive.alterTable(String tblName,
Table newTbl)
Updates the existing table metadata with the new metadata. |
void |
HiveMetaStoreChecker.checkMetastore(String dbName,
String tableName,
List<Map<String,String>> partitions,
CheckResult result)
Check the metastore for inconsistencies, data missing in either the metastore or on the dfs. |
void |
Table.checkValidity()
|
Table |
Table.copy()
|
protected void |
Table.copyFiles(org.apache.hadoop.fs.Path srcf)
Inserts files specified into the partition. |
protected static void |
Hive.copyFiles(org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.FileSystem fs)
|
Partition |
Hive.createPartition(Table tbl,
Map<String,String> partSpec)
Creates a partition. |
Partition |
Hive.createPartition(Table tbl,
Map<String,String> partSpec,
org.apache.hadoop.fs.Path location)
Creates a partition |
void |
Hive.createTable(String tableName,
List<String> columns,
List<String> partCols,
Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat,
Class<?> fileOutputFormat)
Creates a table metdata and the directory for the table data |
void |
Hive.createTable(String tableName,
List<String> columns,
List<String> partCols,
Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat,
Class<?> fileOutputFormat,
int bucketCount,
List<String> bucketCols)
Creates a table metdata and the directory for the table data |
void |
Hive.createTable(Table tbl)
Creates the table with the give objects |
void |
Hive.createTable(Table tbl,
boolean ifNotExists)
Creates the table with the give objects |
boolean |
Hive.dropPartition(String db_name,
String tbl_name,
List<String> part_vals,
boolean deleteData)
|
void |
Hive.dropTable(String dbName,
String tableName)
Drops table along with the data in it. |
void |
Hive.dropTable(String dbName,
String tableName,
boolean deleteData,
boolean ignoreUnknownTab)
Drops the table. |
static Hive |
Hive.get()
|
static Hive |
Hive.get(HiveConf c)
Gets hive object for the current thread. |
static Hive |
Hive.get(HiveConf c,
boolean needsRefresh)
get a connection to metastore. |
List<String> |
Hive.getAllTables()
|
static List<FieldSchema> |
Hive.getFieldsFromDeserializer(String name,
Deserializer serde)
|
Class<? extends org.apache.hadoop.mapred.InputFormat> |
Partition.getInputFormatClass()
|
Class<? extends HiveOutputFormat> |
Partition.getOutputFormatClass()
|
Partition |
Hive.getPartition(Table tbl,
Map<String,String> partSpec,
boolean forceCreate)
Returns partition metadata |
List<String> |
Hive.getPartitionNames(String dbName,
String tblName,
short max)
|
List<Partition> |
Hive.getPartitions(Table tbl)
get all the partitions that the table has |
org.apache.hadoop.fs.Path[] |
Partition.getPath(Sample s)
|
Table |
Hive.getTable(String dbName,
String tableName)
Returns metadata of the table. |
Table |
Hive.getTable(String dbName,
String tableName,
boolean throwException)
Returns metadata of the table |
List<String> |
Hive.getTablesByPattern(String tablePattern)
returns all existing tables from default database which match the given pattern. |
List<String> |
Hive.getTablesForDb(String database,
String tablePattern)
returns all existing tables from the given database which match the given pattern. |
protected void |
Partition.initSerDe()
|
protected void |
Table.initSerDe()
|
boolean |
Table.isValidSpec(Map<String,String> spec)
|
void |
Hive.loadPartition(org.apache.hadoop.fs.Path loadPath,
String tableName,
AbstractMap<String,String> partSpec,
boolean replace,
org.apache.hadoop.fs.Path tmpDirPath)
Load a directory into a Hive Table Partition - Alters existing content of the partition with the contents of loadPath. |
void |
Hive.loadTable(org.apache.hadoop.fs.Path loadPath,
String tableName,
boolean replace,
org.apache.hadoop.fs.Path tmpDirPath)
Load a directory into a Hive Table. |
void |
Table.reinitSerDe()
|
protected void |
Table.replaceFiles(org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path tmpd)
Replaces files in the partition with new data set specified by srcf. |
protected static void |
Hive.replaceFiles(org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tmppath)
Replaces files in the partition with new data set specifed by srcf. |
void |
Table.setBucketCols(List<String> bucketCols)
|
void |
Table.setInputFormatClass(String name)
|
void |
Table.setOutputFormatClass(String name)
|
void |
Table.setSortCols(List<Order> sortOrder)
|
Constructors in org.apache.hadoop.hive.ql.metadata that throw HiveException | |
---|---|
Partition(Table tbl,
Map<String,String> partSpec,
org.apache.hadoop.fs.Path location)
Create partition object with the given info. |
|
Partition(Table tbl,
Partition tp)
|
|
Sample(int num,
int fraction,
Dimension d)
|
|
Table()
Table (only used internally) |
|
Table(String name,
Properties schema,
Deserializer deserializer,
Class<? extends org.apache.hadoop.mapred.InputFormat<?,?>> inputFormatClass,
Class<?> outputFormatClass,
URI dataLocation,
Hive hive)
Table Create a TableMetaInfo object presumably with the intent of saving it to the metastore |
Uses of HiveException in org.apache.hadoop.hive.ql.optimizer.ppr |
---|
Methods in org.apache.hadoop.hive.ql.optimizer.ppr that throw HiveException | |
---|---|
static PrunedPartitionList |
PartitionPruner.prune(Table tab,
exprNodeDesc prunerExpr,
HiveConf conf,
String alias,
Map<String,PrunedPartitionList> prunedPartitionsMap)
Get the partition list for the table that satisfies the partition pruner condition. |
Uses of HiveException in org.apache.hadoop.hive.ql.parse |
---|
Subclasses of HiveException in org.apache.hadoop.hive.ql.parse | |
---|---|
class |
SemanticException
Exception from SemanticAnalyzer |
Uses of HiveException in org.apache.hadoop.hive.ql.plan |
---|
Constructors in org.apache.hadoop.hive.ql.plan that throw HiveException | |
---|---|
partitionDesc(Partition part)
|
Uses of HiveException in org.apache.hadoop.hive.ql.udf.generic |
---|
Methods in org.apache.hadoop.hive.ql.udf.generic that throw HiveException | |
---|---|
void |
GenericUDAFEvaluator.aggregate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
This function will be called by GroupByOperator when it sees a new input row. |
void |
GenericUDTFExplode.close()
|
abstract void |
GenericUDTF.close()
Called to notify the UDTF that there are no more rows to process. |
void |
UDTFCollector.collect(Object input)
|
void |
Collector.collect(Object input)
Other classes will call collect() with the data that it has. |
Object |
GenericUDAFEvaluator.evaluate(GenericUDAFEvaluator.AggregationBuffer agg)
This function will be called by GroupByOperator when it sees a new input row. |
Object |
GenericUDFWhen.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFSplit.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFSize.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFOPNull.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFOPNotNull.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFMap.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFLocate.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFInstr.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFIndex.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFIf.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFHash.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFField.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFElt.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFConcatWS.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFCoalesce.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFCase.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFBridge.evaluate(GenericUDF.DeferredObject[] arguments)
|
Object |
GenericUDFArray.evaluate(GenericUDF.DeferredObject[] arguments)
|
abstract Object |
GenericUDF.evaluate(GenericUDF.DeferredObject[] arguments)
Evaluate the GenericUDF with the arguments. |
protected void |
GenericUDTF.forward(Object o)
Passes an output row to the collector |
Object |
GenericUDF.DeferredObject.get()
|
GenericUDAFEvaluator.AggregationBuffer |
GenericUDAFSum.GenericUDAFSumDouble.getNewAggregationBuffer()
|
GenericUDAFEvaluator.AggregationBuffer |
GenericUDAFSum.GenericUDAFSumLong.getNewAggregationBuffer()
|
GenericUDAFEvaluator.AggregationBuffer |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.getNewAggregationBuffer()
|
GenericUDAFEvaluator.AggregationBuffer |
GenericUDAFCount.GenericUDAFCountEvaluator.getNewAggregationBuffer()
|
GenericUDAFEvaluator.AggregationBuffer |
GenericUDAFAverage.GenericUDAFAverageEvaluator.getNewAggregationBuffer()
|
abstract GenericUDAFEvaluator.AggregationBuffer |
GenericUDAFEvaluator.getNewAggregationBuffer()
Get a new aggregation object. |
ObjectInspector |
GenericUDAFSum.GenericUDAFSumDouble.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
|
ObjectInspector |
GenericUDAFSum.GenericUDAFSumLong.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
|
ObjectInspector |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
|
ObjectInspector |
GenericUDAFCount.GenericUDAFCountEvaluator.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
|
ObjectInspector |
GenericUDAFBridge.GenericUDAFBridgeEvaluator.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
|
ObjectInspector |
GenericUDAFAverage.GenericUDAFAverageEvaluator.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
|
ObjectInspector |
GenericUDAFEvaluator.init(GenericUDAFEvaluator.Mode m,
ObjectInspector[] parameters)
Initialize the evaluator. |
void |
GenericUDAFSum.GenericUDAFSumDouble.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
|
void |
GenericUDAFSum.GenericUDAFSumLong.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
|
void |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
|
void |
GenericUDAFCount.GenericUDAFCountEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
|
void |
GenericUDAFBridge.GenericUDAFBridgeEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
|
void |
GenericUDAFAverage.GenericUDAFAverageEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
|
abstract void |
GenericUDAFEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg,
Object[] parameters)
Iterate through original data. |
void |
GenericUDAFSum.GenericUDAFSumDouble.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
|
void |
GenericUDAFSum.GenericUDAFSumLong.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
|
void |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
|
void |
GenericUDAFCount.GenericUDAFCountEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
|
void |
GenericUDAFBridge.GenericUDAFBridgeEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
|
void |
GenericUDAFAverage.GenericUDAFAverageEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
|
abstract void |
GenericUDAFEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg,
Object partial)
Merge with partial aggregation result. |
void |
GenericUDTFExplode.process(Object[] o)
|
abstract void |
GenericUDTF.process(Object[] args)
Give a set of arguments for the UDTF to process. |
void |
GenericUDAFSum.GenericUDAFSumDouble.reset(GenericUDAFEvaluator.AggregationBuffer agg)
|
void |
GenericUDAFSum.GenericUDAFSumLong.reset(GenericUDAFEvaluator.AggregationBuffer agg)
|
void |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
|
void |
GenericUDAFCount.GenericUDAFCountEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
|
void |
GenericUDAFBridge.GenericUDAFBridgeEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
|
void |
GenericUDAFAverage.GenericUDAFAverageEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
|
abstract void |
GenericUDAFEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
Reset the aggregation. |
Object |
GenericUDAFStd.GenericUDAFStdEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFVarianceSample.GenericUDAFVarianceSampleEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFSum.GenericUDAFSumDouble.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFSum.GenericUDAFSumLong.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFStdSample.GenericUDAFStdSampleEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFCount.GenericUDAFCountEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFBridge.GenericUDAFBridgeEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFAverage.GenericUDAFAverageEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
|
abstract Object |
GenericUDAFEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
Get final aggregation result. |
Object |
GenericUDAFSum.GenericUDAFSumDouble.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFSum.GenericUDAFSumLong.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFVariance.GenericUDAFVarianceEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFCount.GenericUDAFCountEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFBridge.GenericUDAFBridgeEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
|
Object |
GenericUDAFAverage.GenericUDAFAverageEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
|
abstract Object |
GenericUDAFEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
Get partial aggregation result. |
|
||||||||||
PREV NEXT | FRAMES NO FRAMES |