|
||||||||||
PREV NEXT | FRAMES NO FRAMES |
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop |
---|
Methods in org.apache.mahout.cf.taste.hadoop with parameters of type VectorWritable | |
---|---|
protected void |
MaybePruneRowsMapper.map(VarLongWritable rowIndex,
VectorWritable vectorWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.als |
---|
Methods in org.apache.mahout.cf.taste.hadoop.als with parameters of type VectorWritable | |
---|---|
protected void |
PredictionJob.FeaturesMapper.map(org.apache.hadoop.io.IntWritable id,
VectorWritable features,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.item |
---|
Methods in org.apache.mahout.cf.taste.hadoop.item with parameters of type VectorWritable | |
---|---|
protected void |
SimilarityMatrixRowWrapperMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
UserVectorSplitterMapper.map(VarLongWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.similarity.item |
---|
Methods in org.apache.mahout.cf.taste.hadoop.similarity.item with parameters of type VectorWritable | |
---|---|
protected void |
MostSimilarItemPairsMapper.map(org.apache.hadoop.io.IntWritable itemIDIndexWritable,
VectorWritable similarityVector,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
CountUsersMapper.map(VarLongWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.classifier.naivebayes.trainer |
---|
Methods in org.apache.mahout.classifier.naivebayes.trainer with parameters of type VectorWritable | |
---|---|
protected void |
NaiveBayesWeightsMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
NaiveBayesThetaMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
NaiveBayesThetaComplementaryMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
NaiveBayesInstanceMapper.map(org.apache.hadoop.io.Text key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.classifier.naivebayes.trainer with type arguments of type VectorWritable | |
---|---|
protected void |
NaiveBayesSumReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering |
---|
Methods in org.apache.mahout.clustering that return types with arguments of type VectorWritable | |
---|---|
Model<VectorWritable> |
Model.sampleFromPosterior()
|
Model<VectorWritable> |
DistanceMeasureCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering with parameters of type VectorWritable | |
---|---|
void |
AbstractCluster.observe(VectorWritable x)
|
void |
AbstractCluster.observe(VectorWritable x,
double weight)
|
double |
DistanceMeasureCluster.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.canopy |
---|
Methods in org.apache.mahout.clustering.canopy with parameters of type VectorWritable | |
---|---|
protected void |
ClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.clustering.canopy with type arguments of type VectorWritable | |
---|---|
protected void |
CanopyReducer.reduce(org.apache.hadoop.io.Text arg0,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.dirichlet |
---|
Methods in org.apache.mahout.clustering.dirichlet that return types with arguments of type VectorWritable | |
---|---|
ModelDistribution<VectorWritable> |
DirichletState.getModelFactory()
|
Model<VectorWritable> |
DirichletCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering.dirichlet with parameters of type VectorWritable | |
---|---|
double |
DirichletState.adjustedProbability(VectorWritable x,
int k)
return the adjusted probability that x is described by the kth model |
protected int |
DirichletClusterer.assignToModel(VectorWritable observation)
Assign the observation to one of the models based upon probabilities |
void |
DirichletClusterer.emitPointToClusters(VectorWritable vector,
List<DirichletCluster> clusters,
org.apache.hadoop.mapreduce.Mapper.Context context)
Emit the point to one or more clusters depending upon clusterer state |
void |
DirichletClusterer.emitPointToClusters(VectorWritable vector,
List<DirichletCluster> clusters,
org.apache.hadoop.io.SequenceFile.Writer writer)
Emit the point to one or more clusters depending upon clusterer state |
protected void |
DirichletClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable vector,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
DirichletMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable v,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
DirichletClusterer.observe(Model<VectorWritable>[] newModels,
VectorWritable observation)
|
void |
DirichletCluster.observe(VectorWritable x)
|
void |
DirichletCluster.observe(VectorWritable x,
double weight)
|
double |
DirichletCluster.pdf(VectorWritable x)
|
Method parameters in org.apache.mahout.clustering.dirichlet with type arguments of type VectorWritable | |
---|---|
static List<Cluster[]> |
DirichletClusterer.clusterPoints(List<VectorWritable> points,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin,
int numIterations)
Create a new instance on the sample data with the given additional parameters |
static List<Cluster[]> |
DirichletClusterer.clusterPoints(List<VectorWritable> points,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin,
int numIterations)
Create a new instance on the sample data with the given additional parameters |
protected void |
DirichletReducer.reduce(org.apache.hadoop.io.Text key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
void |
DirichletState.setModelFactory(ModelDistribution<VectorWritable> modelFactory)
|
Constructor parameters in org.apache.mahout.clustering.dirichlet with type arguments of type VectorWritable | |
---|---|
DirichletClusterer(List<VectorWritable> sampleData,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin)
Create a new instance on the sample data with the given additional parameters |
|
DirichletClusterer(List<VectorWritable> sampleData,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin)
Create a new instance on the sample data with the given additional parameters |
|
DirichletState(ModelDistribution<VectorWritable> modelFactory,
int numClusters,
double alpha0)
|
Uses of VectorWritable in org.apache.mahout.clustering.dirichlet.models |
---|
Methods in org.apache.mahout.clustering.dirichlet.models that return VectorWritable | |
---|---|
VectorWritable |
AbstractVectorModelDistribution.getModelPrototype()
|
Methods in org.apache.mahout.clustering.dirichlet.models that return types with arguments of type VectorWritable | |
---|---|
ModelDistribution<VectorWritable> |
DistributionDescription.createModelDistribution()
Create an instance of AbstractVectorModelDistribution from the given command line arguments |
Model<VectorWritable> |
GaussianCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering.dirichlet.models with parameters of type VectorWritable | |
---|---|
double |
GaussianCluster.pdf(VectorWritable vw)
|
void |
AbstractVectorModelDistribution.setModelPrototype(VectorWritable modelPrototype)
|
Constructors in org.apache.mahout.clustering.dirichlet.models with parameters of type VectorWritable | |
---|---|
AbstractVectorModelDistribution(VectorWritable modelPrototype)
|
|
DistanceMeasureClusterDistribution(VectorWritable modelPrototype)
|
|
DistanceMeasureClusterDistribution(VectorWritable modelPrototype,
DistanceMeasure measure)
|
|
GaussianClusterDistribution(VectorWritable modelPrototype)
|
Uses of VectorWritable in org.apache.mahout.clustering.fuzzykmeans |
---|
Methods in org.apache.mahout.clustering.fuzzykmeans with parameters of type VectorWritable | |
---|---|
void |
FuzzyKMeansClusterer.emitPointToClusters(VectorWritable point,
List<SoftCluster> clusters,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
void |
FuzzyKMeansClusterer.emitPointToClusters(VectorWritable point,
List<SoftCluster> clusters,
org.apache.hadoop.io.SequenceFile.Writer writer)
|
protected void |
FuzzyKMeansClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
FuzzyKMeansMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
double |
SoftCluster.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.kmeans |
---|
Methods in org.apache.mahout.clustering.kmeans with parameters of type VectorWritable | |
---|---|
protected void |
KMeansClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
KMeansMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.lda |
---|
Methods in org.apache.mahout.clustering.lda with parameters of type VectorWritable | |
---|---|
protected void |
LDAWordTopicMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable wordCountsWritable,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
LDADocumentTopicMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable wordCountsWritable,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.meanshift |
---|
Methods in org.apache.mahout.clustering.meanshift with parameters of type VectorWritable | |
---|---|
protected void |
MeanShiftCanopyCreatorMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
double |
MeanShiftCanopy.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.minhash |
---|
Methods in org.apache.mahout.clustering.minhash with parameters of type VectorWritable | |
---|---|
void |
MinHashMapper.map(org.apache.hadoop.io.Text item,
VectorWritable features,
org.apache.hadoop.mapreduce.Mapper.Context context)
Hash all items with each function and retain min. |
Uses of VectorWritable in org.apache.mahout.clustering.spectral.common |
---|
Methods in org.apache.mahout.clustering.spectral.common with parameters of type VectorWritable | |
---|---|
protected void |
VectorMatrixMultiplicationJob.VectorMatrixMultiplicationMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
UnitVectorizerJob.UnitVectorizerMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vector,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
MatrixDiagonalizeJob.MatrixDiagonalizeMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.spectral.eigencuts |
---|
Methods in org.apache.mahout.clustering.spectral.eigencuts with parameters of type VectorWritable | |
---|---|
protected void |
EigencutsSensitivityMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vw,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
EigencutsAffinityCutsJob.EigencutsAffinityCutsMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop |
---|
Methods in org.apache.mahout.math.hadoop with parameters of type VectorWritable | |
---|---|
void |
TransposeJob.TransposeMapper.map(org.apache.hadoop.io.IntWritable r,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,DistributedRowMatrix.MatrixEntryWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.TimesMapper.map(org.apache.hadoop.io.IntWritable rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TimesSquaredJob.TimesSquaredMapper.map(T rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
protected double |
TimesSquaredJob.TimesSquaredMapper.scale(VectorWritable v)
|
Method parameters in org.apache.mahout.math.hadoop with type arguments of type VectorWritable | |
---|---|
void |
MatrixMultiplicationJob.MatrixMultiplyMapper.map(org.apache.hadoop.io.IntWritable index,
org.apache.hadoop.mapred.join.TupleWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.TimesMapper.map(org.apache.hadoop.io.IntWritable rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TimesSquaredJob.TimesSquaredMapper.map(T rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TransposeJob.TransposeReducer.reduce(org.apache.hadoop.io.IntWritable outRow,
Iterator<DistributedRowMatrix.MatrixEntryWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
MatrixMultiplicationJob.MatrixMultiplicationReducer.reduce(org.apache.hadoop.io.IntWritable rowNum,
Iterator<VectorWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
MatrixMultiplicationJob.MatrixMultiplicationReducer.reduce(org.apache.hadoop.io.IntWritable rowNum,
Iterator<VectorWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.VectorSummingReducer.reduce(org.apache.hadoop.io.NullWritable n,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.VectorSummingReducer.reduce(org.apache.hadoop.io.NullWritable n,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.similarity |
---|
Methods in org.apache.mahout.math.hadoop.similarity with parameters of type VectorWritable | |
---|---|
protected void |
RowSimilarityJob.RowWeightMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vectorWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.stochasticsvd |
---|
Methods in org.apache.mahout.math.hadoop.stochasticsvd with parameters of type VectorWritable | |
---|---|
protected void |
VJob.VMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
BBtJob.BBtMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
BtJob.BtMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
QJob.QMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
UJob.UMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.math.hadoop.stochasticsvd with type arguments of type VectorWritable | |
---|---|
protected void |
BtJob.OuterProductReducer.reduce(org.apache.hadoop.io.IntWritable key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
protected void |
BBtJob.BBtReducer.reduce(org.apache.hadoop.io.IntWritable iw,
Iterable<VectorWritable> ivw,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.common |
---|
Method parameters in org.apache.mahout.vectorizer.common with type arguments of type VectorWritable | |
---|---|
protected void |
PartialVectorMergeReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.term |
---|
Methods in org.apache.mahout.vectorizer.term with parameters of type VectorWritable | |
---|---|
protected void |
TermDocumentCountMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.tfidf |
---|
Method parameters in org.apache.mahout.vectorizer.tfidf with type arguments of type VectorWritable | |
---|---|
protected void |
TFIDFPartialVectorReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
|
||||||||||
PREV NEXT | FRAMES NO FRAMES |