Uses of Class
org.apache.hadoop.hive.ql.metadata.HiveException

Packages that use HiveException
org.apache.hadoop.hive.ql.exec   
org.apache.hadoop.hive.ql.io   
org.apache.hadoop.hive.ql.metadata   
org.apache.hadoop.hive.ql.optimizer.ppr   
org.apache.hadoop.hive.ql.parse   
org.apache.hadoop.hive.ql.session   
org.apache.hadoop.hive.ql.udf.generic   
 

Uses of HiveException in org.apache.hadoop.hive.ql.exec
 

Subclasses of HiveException in org.apache.hadoop.hive.ql.exec
 class AmbiguousMethodException
          Exception thrown by the UDF and UDAF method resolvers in case a unique method is not found.
 class UDFArgumentException
          exception class, thrown when udf argument have something wrong.
 class UDFArgumentLengthException
          exception class, thrown when udf arguments have wrong length.
 class UDFArgumentTypeException
          exception class, thrown when udf arguments have wrong types.
 

Methods in org.apache.hadoop.hive.ql.exec that throw HiveException
protected  void CommonJoinOperator.checkAndGenObject()
           
 void FetchOperator.clearFetchContext()
          Clear the context, if anything needs to be done.
 void ScriptOperator.close(boolean abort)
           
 void Operator.close(boolean abort)
           
 void FileSinkOperator.closeOp(boolean abort)
           
 void MapJoinOperator.closeOp(boolean abort)
           
 void GroupByOperator.closeOp(boolean abort)
          We need to forward all the aggregations to children.
 void MapOperator.closeOp(boolean abort)
          close extra child operators that are initialized but are not executed.
 void CommonJoinOperator.closeOp(boolean abort)
          All done
protected  void Operator.closeOp(boolean abort)
          Operator specific close routine.
protected static ArrayList<Object> CommonJoinOperator.computeValues(Object row, List<ExprNodeEvaluator> valueFields, List<ObjectInspector> valueFieldsOI)
          Return the value as a standard object.
 void GroupByOperator.endGroup()
           
 void CommonJoinOperator.endGroup()
          Forward a record of join results.
 void Operator.endGroup()
           
 Object ExprNodeNullEvaluator.evaluate(Object row)
           
 Object ExprNodeGenericFuncEvaluator.evaluate(Object row)
           
 Object ExprNodeFuncEvaluator.evaluate(Object row)
           
 Object ExprNodeFieldEvaluator.evaluate(Object row)
           
 Object ExprNodeConstantEvaluator.evaluate(Object row)
           
 Object ExprNodeColumnEvaluator.evaluate(Object row)
           
abstract  Object ExprNodeEvaluator.evaluate(Object row)
          Evaluate the expression given the row.
protected  void GroupByOperator.forward(ArrayList<Object> keys, GenericUDAFEvaluator.AggregationBuffer[] aggs)
          Forward a record of keys and aggregation results.
protected  void Operator.forward(Object row, ObjectInspector rowInspector)
           
protected static HashMap<Byte,List<ObjectInspector>> CommonJoinOperator.getObjectInspectorsFromEvaluators(Map<Byte,List<ExprNodeEvaluator>> exprEntries, ObjectInspector[] inputObjInspector)
           
 ObjectInspector FetchOperator.getOutputObjectInspector()
           
static FileSinkOperator.RecordWriter FileSinkOperator.getRecordWriter(org.apache.hadoop.mapred.JobConf jc, HiveOutputFormat<?,?> hiveOutputFormat, Class<? extends org.apache.hadoop.io.Writable> valueClass, boolean isCompressed, Properties tableProp, org.apache.hadoop.fs.Path outPath)
           
protected static ObjectInspector[] Operator.initEvaluators(ExprNodeEvaluator[] evals, ObjectInspector rowInspector)
          Initialize an array of ExprNodeEvaluator and return the result ObjectInspectors.
protected static StructObjectInspector Operator.initEvaluatorsAndReturnStruct(ExprNodeEvaluator[] evals, List<String> outputColName, ObjectInspector rowInspector)
          Initialize an array of ExprNodeEvaluator and put the return values into a StructObjectInspector with integer field names.
 void Operator.initialize(org.apache.hadoop.conf.Configuration hconf, ObjectInspector[] inputOIs)
          Initializes operators only if all parents have been initialized.
 ObjectInspector ExprNodeNullEvaluator.initialize(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeGenericFuncEvaluator.initialize(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeFuncEvaluator.initialize(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeFieldEvaluator.initialize(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeConstantEvaluator.initialize(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeColumnEvaluator.initialize(ObjectInspector rowInspector)
           
abstract  ObjectInspector ExprNodeEvaluator.initialize(ObjectInspector rowInspector)
          Initialize should be called once and only once.
 void MapOperator.initializeAsRoot(org.apache.hadoop.conf.Configuration hconf, mapredWork mrwork)
          Initializes this map op as the root of the tree.
protected  void Operator.initializeChildren(org.apache.hadoop.conf.Configuration hconf)
          Calls initialize on each of the children with outputObjetInspector as the output row format
protected  void ReduceSinkOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void FileSinkOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void UnionOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
          UnionOperator will transform the input rows if the inputObjInspectors from different parents are different.
protected  void SelectOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void ScriptOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void MapJoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void LimitOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void JoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void GroupByOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void FilterOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void ExtractOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
 void MapOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void CommonJoinOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void CollectOperator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
           
protected  void Operator.initializeOp(org.apache.hadoop.conf.Configuration hconf)
          Operator specific initialization.
static Object FunctionRegistry.invoke(Method m, Object thisObject, Object... arguments)
           
 void FileSinkOperator.jobClose(org.apache.hadoop.conf.Configuration hconf, boolean success)
           
 void Operator.jobClose(org.apache.hadoop.conf.Configuration conf, boolean success)
          Unlike other operator interfaces which are called from map or reduce task, jobClose is called from the jobclient side once the job has completed
static void ExecDriver.main(String[] args)
           
protected  GenericUDAFEvaluator.AggregationBuffer[] GroupByOperator.newAggregations()
           
 void ReduceSinkOperator.process(Object row, int tag)
           
 void FileSinkOperator.process(Object row, int tag)
           
 void UnionOperator.process(Object row, int tag)
           
 void TableScanOperator.process(Object row, int tag)
          Currently, the table scan operator does not do anything special other than just forwarding the row.
 void SelectOperator.process(Object row, int tag)
           
 void ScriptOperator.process(Object row, int tag)
           
 void MapJoinOperator.process(Object row, int tag)
           
 void LimitOperator.process(Object row, int tag)
           
 void JoinOperator.process(Object row, int tag)
           
 void GroupByOperator.process(Object row, int tag)
           
 void ForwardOperator.process(Object row, int tag)
           
 void FilterOperator.process(Object row, int tag)
           
 void ExtractOperator.process(Object row, int tag)
           
 void MapOperator.process(Object row, int tag)
           
 void CollectOperator.process(Object row, int tag)
           
abstract  void Operator.process(Object row, int tag)
          Process the row.
 void MapOperator.process(org.apache.hadoop.io.Writable value)
           
static void Utilities.rename(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst)
          Rename src to dst, or in the case dst already exists, move files in src to dst.
static void Utilities.renameOrMoveFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst)
          Rename src to dst, or in the case dst already exists, move files in src to dst.
protected  void GroupByOperator.resetAggregations(GenericUDAFEvaluator.AggregationBuffer[] aggs)
           
 void MapOperator.setChildren(org.apache.hadoop.conf.Configuration hconf)
           
 void GroupByOperator.startGroup()
           
 void CommonJoinOperator.startGroup()
           
 void Operator.startGroup()
           
static void FunctionRegistry.unregisterTemporaryUDF(String functionName)
           
protected  void GroupByOperator.updateAggregations(GenericUDAFEvaluator.AggregationBuffer[] aggs, Object row, ObjectInspector rowInspector, boolean hashAggr, boolean newEntryForHashAggr, Object[][] lastInvoke)
           
 

Constructors in org.apache.hadoop.hive.ql.exec that throw HiveException
ExecDriver(mapredWork plan, org.apache.hadoop.mapred.JobConf job, boolean isSilent)
          Constructor/Initialization for invocation as independent utility
 

Uses of HiveException in org.apache.hadoop.hive.ql.io
 

Methods in org.apache.hadoop.hive.ql.io that throw HiveException
static boolean HiveFileFormatUtils.checkInputFormat(org.apache.hadoop.fs.FileSystem fs, HiveConf conf, Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormatCls, ArrayList<org.apache.hadoop.fs.FileStatus> files)
          checks if files are in same format as the given input format
 

Uses of HiveException in org.apache.hadoop.hive.ql.metadata
 

Subclasses of HiveException in org.apache.hadoop.hive.ql.metadata
 class InvalidTableException
          Generic exception class for Hive
 

Methods in org.apache.hadoop.hive.ql.metadata that throw HiveException
 void Hive.alterTable(String tblName, Table newTbl)
          Updates the existing table metadata with the new metadata.
 void HiveMetaStoreChecker.checkMetastore(String dbName, String tableName, List<Map<String,String>> partitions, CheckResult result)
          Check the metastore for inconsistencies, data missing in either the metastore or on the dfs.
 void Table.checkValidity()
           
protected  void Table.copyFiles(org.apache.hadoop.fs.Path srcf)
          Inserts files specified into the partition.
protected static void Hive.copyFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, org.apache.hadoop.fs.FileSystem fs)
           
 Partition Hive.createPartition(Table tbl, Map<String,String> partSpec)
          Creates a partition.
 Partition Hive.createPartition(Table tbl, Map<String,String> partSpec, org.apache.hadoop.fs.Path location)
          Creates a partition
 void Hive.createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<?> fileOutputFormat)
          Creates a table metdata and the directory for the table data
 void Hive.createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<?> fileOutputFormat, int bucketCount, List<String> bucketCols)
          Creates a table metdata and the directory for the table data
 void Hive.createTable(Table tbl)
          Creates the table with the give objects
 void Hive.createTable(Table tbl, boolean ifNotExists)
          Creates the table with the give objects
 boolean Hive.dropPartition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData)
           
 void Hive.dropTable(String dbName, String tableName)
          Drops table along with the data in it.
 void Hive.dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab)
          Drops the table.
static Hive Hive.get()
           
static Hive Hive.get(HiveConf c)
          Gets hive object for the current thread.
static Hive Hive.get(HiveConf c, boolean needsRefresh)
          get a connection to metastore.
 List<String> Hive.getAllTables()
           
static List<FieldSchema> Hive.getFieldsFromDeserializer(String name, Deserializer serde)
           
 Partition Hive.getPartition(Table tbl, Map<String,String> partSpec, boolean forceCreate)
          Returns partition metadata
 List<String> Hive.getPartitionNames(String dbName, String tblName, short max)
           
 List<Partition> Hive.getPartitions(Table tbl)
          get all the partitions that the table has
 org.apache.hadoop.fs.Path[] Partition.getPath(Sample s)
           
 Table Hive.getTable(String dbName, String tableName)
          Returns metadata of the table.
 Table Hive.getTable(String dbName, String tableName, boolean throwException)
          Returns metadata of the table
 List<String> Hive.getTablesByPattern(String tablePattern)
          returns all existing tables that match the given pattern.
protected  List<String> Hive.getTablesForDb(String database, String tablePattern)
           
protected  void Table.initSerDe()
           
 boolean Table.isValidSpec(Map<String,String> spec)
           
 void Hive.loadPartition(org.apache.hadoop.fs.Path loadPath, String tableName, AbstractMap<String,String> partSpec, boolean replace, org.apache.hadoop.fs.Path tmpDirPath)
          Load a directory into a Hive Table Partition - Alters existing content of the partition with the contents of loadPath.
 void Hive.loadTable(org.apache.hadoop.fs.Path loadPath, String tableName, boolean replace, org.apache.hadoop.fs.Path tmpDirPath)
          Load a directory into a Hive Table.
 void Table.reinitSerDe()
           
protected  void Table.replaceFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path tmpd)
          Replaces files in the partition with new data set specified by srcf.
protected static void Hive.replaceFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path tmppath)
          Replaces files in the partition with new data set specifed by srcf.
 void Table.setBucketCols(List<String> bucketCols)
           
 void Table.setInputFormatClass(String name)
           
 void Table.setOutputFormatClass(String name)
           
 void Table.setSortCols(List<Order> sortOrder)
           
 

Constructors in org.apache.hadoop.hive.ql.metadata that throw HiveException
Partition(Table tbl, Map<String,String> partSpec, org.apache.hadoop.fs.Path location)
          Create partition object with the given info.
Partition(Table tbl, Partition tp)
           
Sample(int num, int fraction, Dimension d)
           
Table()
          Table (only used internally)
Table(String name, Properties schema, Deserializer deserializer, Class<? extends org.apache.hadoop.mapred.InputFormat<?,?>> inputFormatClass, Class<?> outputFormatClass, URI dataLocation, Hive hive)
          Table Create a TableMetaInfo object presumably with the intent of saving it to the metastore
 

Uses of HiveException in org.apache.hadoop.hive.ql.optimizer.ppr
 

Methods in org.apache.hadoop.hive.ql.optimizer.ppr that throw HiveException
static PrunedPartitionList PartitionPruner.prune(Table tab, exprNodeDesc prunerExpr, HiveConf conf, String alias)
          Get the partition list for the table that satisfies the partition pruner condition.
 

Uses of HiveException in org.apache.hadoop.hive.ql.parse
 

Subclasses of HiveException in org.apache.hadoop.hive.ql.parse
 class SemanticException
          Exception from SemanticAnalyzer
 

Methods in org.apache.hadoop.hive.ql.parse that throw HiveException
 PrunedPartitionList ASTPartitionPruner.prune()
          From the table metadata prune the partitions to return the partitions.
 

Uses of HiveException in org.apache.hadoop.hive.ql.session
 

Methods in org.apache.hadoop.hive.ql.session that throw HiveException
 Hive SessionState.getDb()
           
 

Uses of HiveException in org.apache.hadoop.hive.ql.udf.generic
 

Methods in org.apache.hadoop.hive.ql.udf.generic that throw HiveException
 void GenericUDAFEvaluator.aggregate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
          This function will be called by GroupByOperator when it sees a new input row.
 Object GenericUDAFEvaluator.evaluate(GenericUDAFEvaluator.AggregationBuffer agg)
          This function will be called by GroupByOperator when it sees a new input row.
 Object GenericUDFWhen.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFSplit.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFSize.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFOPNull.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFOPNotNull.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFLocate.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFInstr.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFIndex.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFIf.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFHash.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFElt.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFCoalesce.evaluate(GenericUDF.DeferredObject[] arguments)
           
 Object GenericUDFCase.evaluate(GenericUDF.DeferredObject[] arguments)
           
abstract  Object GenericUDF.evaluate(GenericUDF.DeferredObject[] arguments)
          Evaluate the GenericUDF with the arguments.
 Object GenericUDF.DeferredObject.get()
           
 GenericUDAFEvaluator.AggregationBuffer GenericUDAFSum.GenericUDAFSumDouble.getNewAggregationBuffer()
           
 GenericUDAFEvaluator.AggregationBuffer GenericUDAFSum.GenericUDAFSumLong.getNewAggregationBuffer()
           
 GenericUDAFEvaluator.AggregationBuffer GenericUDAFVariance.GenericUDAFVarianceEvaluator.getNewAggregationBuffer()
           
 GenericUDAFEvaluator.AggregationBuffer GenericUDAFCount.GenericUDAFCountEvaluator.getNewAggregationBuffer()
           
 GenericUDAFEvaluator.AggregationBuffer GenericUDAFAverage.GenericUDAFAverageEvaluator.getNewAggregationBuffer()
           
abstract  GenericUDAFEvaluator.AggregationBuffer GenericUDAFEvaluator.getNewAggregationBuffer()
          Get a new aggregation object.
 ObjectInspector GenericUDAFSum.GenericUDAFSumDouble.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
           
 ObjectInspector GenericUDAFSum.GenericUDAFSumLong.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
           
 ObjectInspector GenericUDAFVariance.GenericUDAFVarianceEvaluator.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
           
 ObjectInspector GenericUDAFCount.GenericUDAFCountEvaluator.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
           
 ObjectInspector GenericUDAFBridge.GenericUDAFBridgeEvaluator.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
           
 ObjectInspector GenericUDAFAverage.GenericUDAFAverageEvaluator.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
           
 ObjectInspector GenericUDAFEvaluator.init(GenericUDAFEvaluator.Mode m, ObjectInspector[] parameters)
          Initialize the evaluator.
 void GenericUDAFSum.GenericUDAFSumDouble.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
           
 void GenericUDAFSum.GenericUDAFSumLong.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
           
 void GenericUDAFVariance.GenericUDAFVarianceEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
           
 void GenericUDAFCount.GenericUDAFCountEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
           
 void GenericUDAFBridge.GenericUDAFBridgeEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
           
 void GenericUDAFAverage.GenericUDAFAverageEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
           
abstract  void GenericUDAFEvaluator.iterate(GenericUDAFEvaluator.AggregationBuffer agg, Object[] parameters)
          Iterate through original data.
 void GenericUDAFSum.GenericUDAFSumDouble.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
           
 void GenericUDAFSum.GenericUDAFSumLong.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
           
 void GenericUDAFVariance.GenericUDAFVarianceEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
           
 void GenericUDAFCount.GenericUDAFCountEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
           
 void GenericUDAFBridge.GenericUDAFBridgeEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
           
 void GenericUDAFAverage.GenericUDAFAverageEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
           
abstract  void GenericUDAFEvaluator.merge(GenericUDAFEvaluator.AggregationBuffer agg, Object partial)
          Merge with partial aggregation result.
 void GenericUDAFSum.GenericUDAFSumDouble.reset(GenericUDAFEvaluator.AggregationBuffer agg)
           
 void GenericUDAFSum.GenericUDAFSumLong.reset(GenericUDAFEvaluator.AggregationBuffer agg)
           
 void GenericUDAFVariance.GenericUDAFVarianceEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
           
 void GenericUDAFCount.GenericUDAFCountEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
           
 void GenericUDAFBridge.GenericUDAFBridgeEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
           
 void GenericUDAFAverage.GenericUDAFAverageEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
           
abstract  void GenericUDAFEvaluator.reset(GenericUDAFEvaluator.AggregationBuffer agg)
          Reset the aggregation.
 Object GenericUDAFStd.GenericUDAFStdEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFVarianceSample.GenericUDAFVarianceSampleEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFSum.GenericUDAFSumDouble.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFSum.GenericUDAFSumLong.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFStdSample.GenericUDAFStdSampleEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFVariance.GenericUDAFVarianceEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFCount.GenericUDAFCountEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFBridge.GenericUDAFBridgeEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFAverage.GenericUDAFAverageEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
           
abstract  Object GenericUDAFEvaluator.terminate(GenericUDAFEvaluator.AggregationBuffer agg)
          Get final aggregation result.
 Object GenericUDAFSum.GenericUDAFSumDouble.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFSum.GenericUDAFSumLong.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFVariance.GenericUDAFVarianceEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFCount.GenericUDAFCountEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFBridge.GenericUDAFBridgeEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
           
 Object GenericUDAFAverage.GenericUDAFAverageEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
           
abstract  Object GenericUDAFEvaluator.terminatePartial(GenericUDAFEvaluator.AggregationBuffer agg)
          Get partial aggregation result.
 



Copyright © 2009 The Apache Software Foundation