|
||||||||||
PREV NEXT | FRAMES NO FRAMES |
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop |
---|
Methods in org.apache.mahout.cf.taste.hadoop with parameters of type VectorWritable | |
---|---|
protected void |
MaybePruneRowsMapper.map(VarLongWritable rowIndex,
VectorWritable vectorWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.item |
---|
Methods in org.apache.mahout.cf.taste.hadoop.item with parameters of type VectorWritable | |
---|---|
protected void |
SimilarityMatrixRowWrapperMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
UserVectorSplitterMapper.map(VarLongWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.similarity.item |
---|
Methods in org.apache.mahout.cf.taste.hadoop.similarity.item with parameters of type VectorWritable | |
---|---|
protected void |
MostSimilarItemPairsMapper.map(org.apache.hadoop.io.IntWritable itemIDIndexWritable,
VectorWritable similarityVector,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.classifier.naivebayes.trainer |
---|
Methods in org.apache.mahout.classifier.naivebayes.trainer with parameters of type VectorWritable | |
---|---|
protected void |
NaiveBayesWeightsMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
NaiveBayesThetaMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
NaiveBayesThetaComplementaryMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
NaiveBayesInstanceMapper.map(org.apache.hadoop.io.Text key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.classifier.naivebayes.trainer with type arguments of type VectorWritable | |
---|---|
protected void |
NaiveBayesSumReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
java.lang.Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering |
---|
Methods in org.apache.mahout.clustering that return types with arguments of type VectorWritable | |
---|---|
Model<VectorWritable> |
Model.sampleFromPosterior()
|
Model<VectorWritable> |
DistanceMeasureCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering with parameters of type VectorWritable | |
---|---|
void |
AbstractCluster.observe(VectorWritable x)
|
double |
DistanceMeasureCluster.pdf(VectorWritable vw)
|
Constructor parameters in org.apache.mahout.clustering with type arguments of type VectorWritable | |
---|---|
VectorModelClassifier(java.util.List<Model<VectorWritable>> models)
|
Uses of VectorWritable in org.apache.mahout.clustering.canopy |
---|
Methods in org.apache.mahout.clustering.canopy with parameters of type VectorWritable | |
---|---|
protected void |
ClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.clustering.canopy with type arguments of type VectorWritable | |
---|---|
protected void |
CanopyReducer.reduce(org.apache.hadoop.io.Text arg0,
java.lang.Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.dirichlet |
---|
Methods in org.apache.mahout.clustering.dirichlet that return types with arguments of type VectorWritable | |
---|---|
ModelDistribution<VectorWritable> |
DirichletState.getModelFactory()
|
Model<VectorWritable> |
DirichletCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering.dirichlet with parameters of type VectorWritable | |
---|---|
double |
DirichletState.adjustedProbability(VectorWritable x,
int k)
return the adjusted probability that x is described by the kth model |
protected int |
DirichletClusterer.assignToModel(VectorWritable observation)
Assign the observation to one of the models based upon probabilities |
void |
DirichletClusterer.emitPointToClusters(VectorWritable vector,
java.util.List<DirichletCluster> clusters,
org.apache.hadoop.mapreduce.Mapper.Context context)
Emit the point to one or more clusters depending upon clusterer state |
void |
DirichletClusterer.emitPointToClusters(VectorWritable vector,
java.util.List<DirichletCluster> clusters,
org.apache.hadoop.io.SequenceFile.Writer writer)
Emit the point to one or more clusters depending upon clusterer state |
protected void |
DirichletClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable vector,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
DirichletMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable v,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
DirichletClusterer.observe(Model<VectorWritable>[] newModels,
VectorWritable observation)
|
void |
DirichletCluster.observe(VectorWritable x)
|
double |
DirichletCluster.pdf(VectorWritable x)
|
Method parameters in org.apache.mahout.clustering.dirichlet with type arguments of type VectorWritable | |
---|---|
static org.apache.hadoop.fs.Path |
DirichletDriver.buildClusters(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.Path input,
org.apache.hadoop.fs.Path output,
ModelDistribution<VectorWritable> modelDistribution,
int numClusters,
int maxIterations,
double alpha0,
boolean runSequential)
Iterate over the input vectors to produce cluster directories for each iteration |
static java.util.List<Cluster[]> |
DirichletClusterer.clusterPoints(java.util.List<VectorWritable> points,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin,
int numIterations)
Create a new instance on the sample data with the given additional parameters |
static java.util.List<Cluster[]> |
DirichletClusterer.clusterPoints(java.util.List<VectorWritable> points,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin,
int numIterations)
Create a new instance on the sample data with the given additional parameters |
protected static DirichletState |
DirichletMapper.loadState(org.apache.hadoop.conf.Configuration conf,
java.lang.String statePath,
ModelDistribution<VectorWritable> modelDistribution,
double alpha,
int k)
|
protected void |
DirichletReducer.reduce(org.apache.hadoop.io.Text key,
java.lang.Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
static void |
DirichletDriver.run(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.Path input,
org.apache.hadoop.fs.Path output,
ModelDistribution<VectorWritable> modelDistribution,
int numModels,
int maxIterations,
double alpha0,
boolean runClustering,
boolean emitMostLikely,
double threshold,
boolean runSequential)
Iterate over the input vectors to produce clusters and, if requested, use the results of the final iteration to cluster the input vectors. |
static void |
DirichletDriver.run(org.apache.hadoop.fs.Path input,
org.apache.hadoop.fs.Path output,
ModelDistribution<VectorWritable> modelDistribution,
int numClusters,
int maxIterations,
double alpha0,
boolean runClustering,
boolean emitMostLikely,
double threshold,
boolean runSequential)
Convenience method provides default Configuration Iterate over the input vectors to produce clusters and, if requested, use the results of the final iteration to cluster the input vectors. |
void |
DirichletState.setModelFactory(ModelDistribution<VectorWritable> modelFactory)
|
Constructor parameters in org.apache.mahout.clustering.dirichlet with type arguments of type VectorWritable | |
---|---|
DirichletClusterer(java.util.List<VectorWritable> sampleData,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin)
Create a new instance on the sample data with the given additional parameters |
|
DirichletClusterer(java.util.List<VectorWritable> sampleData,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin)
Create a new instance on the sample data with the given additional parameters |
|
DirichletState(ModelDistribution<VectorWritable> modelFactory,
int numClusters,
double alpha0)
|
Uses of VectorWritable in org.apache.mahout.clustering.dirichlet.models |
---|
Methods in org.apache.mahout.clustering.dirichlet.models that return VectorWritable | |
---|---|
VectorWritable |
AbstractVectorModelDistribution.getModelPrototype()
|
Methods in org.apache.mahout.clustering.dirichlet.models that return types with arguments of type VectorWritable | |
---|---|
Model<VectorWritable> |
GaussianCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering.dirichlet.models with parameters of type VectorWritable | |
---|---|
void |
NormalModel.observe(VectorWritable x)
|
void |
L1Model.observe(VectorWritable x)
|
void |
AsymmetricSampledNormalModel.observe(VectorWritable v)
|
double |
NormalModel.pdf(VectorWritable v)
|
double |
L1Model.pdf(VectorWritable x)
|
double |
GaussianCluster.pdf(VectorWritable vw)
|
double |
AsymmetricSampledNormalModel.pdf(VectorWritable v)
|
void |
AbstractVectorModelDistribution.setModelPrototype(VectorWritable modelPrototype)
|
Constructors in org.apache.mahout.clustering.dirichlet.models with parameters of type VectorWritable | |
---|---|
AbstractVectorModelDistribution(VectorWritable modelPrototype)
|
|
AsymmetricSampledNormalDistribution(VectorWritable modelPrototype)
|
|
DistanceMeasureClusterDistribution(VectorWritable modelPrototype)
|
|
DistanceMeasureClusterDistribution(VectorWritable modelPrototype,
DistanceMeasure measure)
|
|
GaussianClusterDistribution(VectorWritable modelPrototype)
|
|
L1ModelDistribution(VectorWritable modelPrototype)
|
|
NormalModelDistribution(VectorWritable modelPrototype)
|
|
SampledNormalDistribution(VectorWritable modelPrototype)
|
Uses of VectorWritable in org.apache.mahout.clustering.fuzzykmeans |
---|
Methods in org.apache.mahout.clustering.fuzzykmeans with parameters of type VectorWritable | |
---|---|
void |
FuzzyKMeansClusterer.emitPointToClusters(VectorWritable point,
java.util.List<SoftCluster> clusters,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
void |
FuzzyKMeansClusterer.emitPointToClusters(VectorWritable point,
java.util.List<SoftCluster> clusters,
org.apache.hadoop.io.SequenceFile.Writer writer)
|
protected void |
FuzzyKMeansClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
FuzzyKMeansMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
double |
SoftCluster.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.kmeans |
---|
Methods in org.apache.mahout.clustering.kmeans with parameters of type VectorWritable | |
---|---|
protected void |
KMeansClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
KMeansMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.lda |
---|
Methods in org.apache.mahout.clustering.lda with parameters of type VectorWritable | |
---|---|
protected void |
LDAMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable wordCountsWritable,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.meanshift |
---|
Methods in org.apache.mahout.clustering.meanshift with parameters of type VectorWritable | |
---|---|
protected void |
MeanShiftCanopyCreatorMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
double |
MeanShiftCanopy.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.spectral.common |
---|
Methods in org.apache.mahout.clustering.spectral.common with parameters of type VectorWritable | |
---|---|
protected void |
VectorMatrixMultiplicationJob.VectorMatrixMultiplicationMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
UnitVectorizerJob.UnitVectorizerMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vector,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
MatrixDiagonalizeJob.MatrixDiagonalizeMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.spectral.eigencuts |
---|
Methods in org.apache.mahout.clustering.spectral.eigencuts with parameters of type VectorWritable | |
---|---|
protected void |
EigencutsSensitivityMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vw,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
EigencutsAffinityCutsJob.EigencutsAffinityCutsMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop |
---|
Methods in org.apache.mahout.math.hadoop with parameters of type VectorWritable | |
---|---|
void |
TransposeJob.TransposeMapper.map(org.apache.hadoop.io.IntWritable r,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,DistributedRowMatrix.MatrixEntryWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.TimesMapper.map(org.apache.hadoop.io.IntWritable rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TimesSquaredJob.TimesSquaredMapper.map(T rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
protected double |
TimesSquaredJob.TimesSquaredMapper.scale(VectorWritable v)
|
Method parameters in org.apache.mahout.math.hadoop with type arguments of type VectorWritable | |
---|---|
void |
MatrixMultiplicationJob.MatrixMultiplyMapper.map(org.apache.hadoop.io.IntWritable index,
org.apache.hadoop.mapred.join.TupleWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.TimesMapper.map(org.apache.hadoop.io.IntWritable rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TimesSquaredJob.TimesSquaredMapper.map(T rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TransposeJob.TransposeReducer.reduce(org.apache.hadoop.io.IntWritable outRow,
java.util.Iterator<DistributedRowMatrix.MatrixEntryWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
MatrixMultiplicationJob.MatrixMultiplicationReducer.reduce(org.apache.hadoop.io.IntWritable rowNum,
java.util.Iterator<VectorWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
MatrixMultiplicationJob.MatrixMultiplicationReducer.reduce(org.apache.hadoop.io.IntWritable rowNum,
java.util.Iterator<VectorWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.VectorSummingReducer.reduce(org.apache.hadoop.io.NullWritable n,
java.util.Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.VectorSummingReducer.reduce(org.apache.hadoop.io.NullWritable n,
java.util.Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.similarity |
---|
Methods in org.apache.mahout.math.hadoop.similarity with parameters of type VectorWritable | |
---|---|
protected void |
RowSimilarityJob.RowWeightMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vectorWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.common |
---|
Method parameters in org.apache.mahout.vectorizer.common with type arguments of type VectorWritable | |
---|---|
protected void |
PartialVectorMergeReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
java.lang.Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.term |
---|
Methods in org.apache.mahout.vectorizer.term with parameters of type VectorWritable | |
---|---|
protected void |
TermDocumentCountMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.tfidf |
---|
Method parameters in org.apache.mahout.vectorizer.tfidf with type arguments of type VectorWritable | |
---|---|
protected void |
TFIDFPartialVectorReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
java.lang.Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
|
||||||||||
PREV NEXT | FRAMES NO FRAMES |