|
||||||||||
PREV NEXT | FRAMES NO FRAMES |
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.item |
---|
Methods in org.apache.mahout.cf.taste.hadoop.item with parameters of type VectorWritable | |
---|---|
protected void |
SimilarityMatrixRowWrapperMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
UserVectorSplitterMapper.map(VarLongWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.preparation |
---|
Methods in org.apache.mahout.cf.taste.hadoop.preparation with parameters of type VectorWritable | |
---|---|
protected void |
ToItemVectorsMapper.map(VarLongWritable rowIndex,
VectorWritable vectorWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Method parameters in org.apache.mahout.cf.taste.hadoop.preparation with type arguments of type VectorWritable | |
---|---|
protected void |
ToItemVectorsReducer.reduce(org.apache.hadoop.io.IntWritable row,
Iterable<VectorWritable> vectors,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.cf.taste.hadoop.similarity.item |
---|
Methods in org.apache.mahout.cf.taste.hadoop.similarity.item with parameters of type VectorWritable | |
---|---|
protected void |
ItemSimilarityJob.MostSimilarItemPairsMapper.map(org.apache.hadoop.io.IntWritable itemIDIndexWritable,
VectorWritable similarityVector,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.classifier.naivebayes.test |
---|
Methods in org.apache.mahout.classifier.naivebayes.test with parameters of type VectorWritable | |
---|---|
protected void |
BayesTestMapper.map(org.apache.hadoop.io.Text key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.classifier.naivebayes.training |
---|
Methods in org.apache.mahout.classifier.naivebayes.training with parameters of type VectorWritable | |
---|---|
protected void |
WeightsMapper.map(org.apache.hadoop.io.IntWritable index,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
ThetaMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
IndexInstancesMapper.map(org.apache.hadoop.io.Text labelText,
VectorWritable instance,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.clustering |
---|
Methods in org.apache.mahout.clustering that return types with arguments of type VectorWritable | |
---|---|
Model<VectorWritable> |
Model.sampleFromPosterior()
|
Model<VectorWritable> |
DistanceMeasureCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering with parameters of type VectorWritable | |
---|---|
protected void |
CIMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
void |
AbstractCluster.observe(VectorWritable x)
|
void |
AbstractCluster.observe(VectorWritable x,
double weight)
|
double |
DistanceMeasureCluster.pdf(VectorWritable vw)
|
Method parameters in org.apache.mahout.clustering with type arguments of type VectorWritable | |
---|---|
void |
AbstractCluster.observe(Model<VectorWritable> x)
|
Uses of VectorWritable in org.apache.mahout.clustering.canopy |
---|
Methods in org.apache.mahout.clustering.canopy with parameters of type VectorWritable | |
---|---|
protected void |
ClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.clustering.canopy with type arguments of type VectorWritable | |
---|---|
protected void |
CanopyReducer.reduce(org.apache.hadoop.io.Text arg0,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.dirichlet |
---|
Methods in org.apache.mahout.clustering.dirichlet that return types with arguments of type VectorWritable | |
---|---|
ModelDistribution<VectorWritable> |
DirichletState.getModelFactory()
|
Model<VectorWritable> |
DirichletCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering.dirichlet with parameters of type VectorWritable | |
---|---|
double |
DirichletState.adjustedProbability(VectorWritable x,
int k)
return the adjusted probability that x is described by the kth model |
protected int |
DirichletClusterer.assignToModel(VectorWritable observation)
Assign the observation to one of the models based upon probabilities |
void |
DirichletClusterer.emitPointToClusters(VectorWritable vector,
List<DirichletCluster> clusters,
org.apache.hadoop.mapreduce.Mapper.Context context)
Emit the point to one or more clusters depending upon clusterer state |
void |
DirichletClusterer.emitPointToClusters(VectorWritable vector,
List<DirichletCluster> clusters,
org.apache.hadoop.io.SequenceFile.Writer writer)
Emit the point to one or more clusters depending upon clusterer state |
protected void |
DirichletClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable vector,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
DirichletMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable v,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
DirichletClusterer.observe(Model<VectorWritable>[] newModels,
VectorWritable observation)
|
void |
DirichletCluster.observe(VectorWritable x)
|
void |
DirichletCluster.observe(VectorWritable x,
double weight)
|
double |
DirichletCluster.pdf(VectorWritable x)
|
Method parameters in org.apache.mahout.clustering.dirichlet with type arguments of type VectorWritable | |
---|---|
static List<Cluster[]> |
DirichletClusterer.clusterPoints(List<VectorWritable> points,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin,
int numIterations)
Create a new instance on the sample data with the given additional parameters |
static List<Cluster[]> |
DirichletClusterer.clusterPoints(List<VectorWritable> points,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin,
int numIterations)
Create a new instance on the sample data with the given additional parameters |
void |
DirichletCluster.observe(Model<VectorWritable> x)
|
protected void |
DirichletReducer.reduce(org.apache.hadoop.io.Text key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
void |
DirichletState.setModelFactory(ModelDistribution<VectorWritable> modelFactory)
|
Constructor parameters in org.apache.mahout.clustering.dirichlet with type arguments of type VectorWritable | |
---|---|
DirichletClusterer(List<VectorWritable> sampleData,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin)
Create a new instance on the sample data with the given additional parameters |
|
DirichletClusterer(List<VectorWritable> sampleData,
ModelDistribution<VectorWritable> modelFactory,
double alpha0,
int numClusters,
int thin,
int burnin)
Create a new instance on the sample data with the given additional parameters |
|
DirichletState(ModelDistribution<VectorWritable> modelFactory,
int numClusters,
double alpha0)
|
Uses of VectorWritable in org.apache.mahout.clustering.dirichlet.models |
---|
Methods in org.apache.mahout.clustering.dirichlet.models that return VectorWritable | |
---|---|
VectorWritable |
AbstractVectorModelDistribution.getModelPrototype()
|
Methods in org.apache.mahout.clustering.dirichlet.models that return types with arguments of type VectorWritable | |
---|---|
ModelDistribution<VectorWritable> |
DistributionDescription.createModelDistribution()
Create an instance of AbstractVectorModelDistribution from the given command line arguments |
Model<VectorWritable> |
GaussianCluster.sampleFromPosterior()
|
Methods in org.apache.mahout.clustering.dirichlet.models with parameters of type VectorWritable | |
---|---|
double |
GaussianCluster.pdf(VectorWritable vw)
|
void |
AbstractVectorModelDistribution.setModelPrototype(VectorWritable modelPrototype)
|
Constructors in org.apache.mahout.clustering.dirichlet.models with parameters of type VectorWritable | |
---|---|
AbstractVectorModelDistribution(VectorWritable modelPrototype)
|
|
DistanceMeasureClusterDistribution(VectorWritable modelPrototype)
|
|
DistanceMeasureClusterDistribution(VectorWritable modelPrototype,
DistanceMeasure measure)
|
|
GaussianClusterDistribution(VectorWritable modelPrototype)
|
Uses of VectorWritable in org.apache.mahout.clustering.fuzzykmeans |
---|
Methods in org.apache.mahout.clustering.fuzzykmeans with parameters of type VectorWritable | |
---|---|
void |
FuzzyKMeansClusterer.emitPointToClusters(VectorWritable point,
List<SoftCluster> clusters,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
void |
FuzzyKMeansClusterer.emitPointToClusters(VectorWritable point,
List<SoftCluster> clusters,
org.apache.hadoop.io.SequenceFile.Writer writer)
|
protected void |
FuzzyKMeansClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
FuzzyKMeansMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
double |
SoftCluster.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.kmeans |
---|
Methods in org.apache.mahout.clustering.kmeans with parameters of type VectorWritable | |
---|---|
protected void |
KMeansClusterMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
KMeansMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.lda |
---|
Methods in org.apache.mahout.clustering.lda with parameters of type VectorWritable | |
---|---|
protected void |
LDAWordTopicMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable wordCountsWritable,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
LDADocumentTopicMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable wordCountsWritable,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.lda.cvb |
---|
Methods in org.apache.mahout.clustering.lda.cvb with parameters of type VectorWritable | |
---|---|
void |
CachingCVB0PerplexityMapper.map(org.apache.hadoop.io.IntWritable docId,
VectorWritable document,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
CVB0TopicTermVectorNormalizerMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
void |
CVB0DocInferenceMapper.map(org.apache.hadoop.io.IntWritable docId,
VectorWritable doc,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
void |
CachingCVB0Mapper.map(org.apache.hadoop.io.IntWritable docId,
VectorWritable document,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.meanshift |
---|
Methods in org.apache.mahout.clustering.meanshift with parameters of type VectorWritable | |
---|---|
protected void |
MeanShiftCanopyCreatorMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable point,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
double |
MeanShiftCanopy.pdf(VectorWritable vw)
|
Uses of VectorWritable in org.apache.mahout.clustering.minhash |
---|
Methods in org.apache.mahout.clustering.minhash with parameters of type VectorWritable | |
---|---|
void |
MinHashMapper.map(org.apache.hadoop.io.Text item,
VectorWritable features,
org.apache.hadoop.mapreduce.Mapper.Context context)
Hash all items with each function and retain min. |
Uses of VectorWritable in org.apache.mahout.clustering.spectral.common |
---|
Methods in org.apache.mahout.clustering.spectral.common with parameters of type VectorWritable | |
---|---|
protected void |
VectorMatrixMultiplicationJob.VectorMatrixMultiplicationMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
UnitVectorizerJob.UnitVectorizerMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vector,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
MatrixDiagonalizeJob.MatrixDiagonalizeMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.spectral.eigencuts |
---|
Methods in org.apache.mahout.clustering.spectral.eigencuts with parameters of type VectorWritable | |
---|---|
protected void |
EigencutsSensitivityMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vw,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
EigencutsAffinityCutsJob.EigencutsAffinityCutsMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable row,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.clustering.topdown.postprocessor |
---|
Method parameters in org.apache.mahout.clustering.topdown.postprocessor with type arguments of type VectorWritable | |
---|---|
protected void |
ClusterOutputPostProcessorReducer.reduce(org.apache.hadoop.io.Text key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
The key is the cluster id and the values contains the points in that cluster. |
Uses of VectorWritable in org.apache.mahout.common.mapreduce |
---|
Methods in org.apache.mahout.common.mapreduce with parameters of type VectorWritable | |
---|---|
protected void |
TransposeMapper.map(org.apache.hadoop.io.IntWritable r,
VectorWritable v,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
Method parameters in org.apache.mahout.common.mapreduce with type arguments of type VectorWritable | |
---|---|
protected void |
VectorSumReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
void |
MergeVectorsReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> vectors,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
void |
MergeVectorsCombiner.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> vectors,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.math |
---|
Methods in org.apache.mahout.math that return VectorWritable | |
---|---|
static VectorWritable |
VectorWritable.merge(Iterator<VectorWritable> vectors)
|
Method parameters in org.apache.mahout.math with type arguments of type VectorWritable | |
---|---|
static VectorWritable |
VectorWritable.merge(Iterator<VectorWritable> vectors)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop |
---|
Methods in org.apache.mahout.math.hadoop with parameters of type VectorWritable | |
---|---|
void |
TransposeJob.TransposeMapper.map(org.apache.hadoop.io.IntWritable r,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.TimesMapper.map(org.apache.hadoop.io.IntWritable rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TimesSquaredJob.TimesSquaredMapper.map(T rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
protected double |
TimesSquaredJob.TimesSquaredMapper.scale(VectorWritable v)
|
Method parameters in org.apache.mahout.math.hadoop with type arguments of type VectorWritable | |
---|---|
void |
MatrixMultiplicationJob.MatrixMultiplyMapper.map(org.apache.hadoop.io.IntWritable index,
org.apache.hadoop.mapred.join.TupleWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TransposeJob.TransposeMapper.map(org.apache.hadoop.io.IntWritable r,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.TimesMapper.map(org.apache.hadoop.io.IntWritable rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
TimesSquaredJob.TimesSquaredMapper.map(T rowNum,
VectorWritable v,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter rep)
|
void |
MatrixMultiplicationJob.MatrixMultiplicationReducer.reduce(org.apache.hadoop.io.IntWritable rowNum,
Iterator<VectorWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
MatrixMultiplicationJob.MatrixMultiplicationReducer.reduce(org.apache.hadoop.io.IntWritable rowNum,
Iterator<VectorWritable> it,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.IntWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.VectorSummingReducer.reduce(org.apache.hadoop.io.NullWritable n,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TimesSquaredJob.VectorSummingReducer.reduce(org.apache.hadoop.io.NullWritable n,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.NullWritable,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TransposeJob.MergeVectorsCombiner.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.WritableComparable<?>,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TransposeJob.MergeVectorsCombiner.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.WritableComparable<?>,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TransposeJob.MergeVectorsReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.WritableComparable<?>,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
void |
TransposeJob.MergeVectorsReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterator<VectorWritable> vectors,
org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.WritableComparable<?>,VectorWritable> out,
org.apache.hadoop.mapred.Reporter reporter)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.similarity |
---|
Methods in org.apache.mahout.math.hadoop.similarity with parameters of type VectorWritable | |
---|---|
protected void |
VectorDistanceMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
VectorDistanceInvertedMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.similarity.cooccurrence |
---|
Methods in org.apache.mahout.math.hadoop.similarity.cooccurrence with parameters of type VectorWritable | |
---|---|
protected void |
RowSimilarityJob.VectorNormMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable vectorWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
RowSimilarityJob.CooccurrencesMapper.map(org.apache.hadoop.io.IntWritable column,
VectorWritable occurrenceVector,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
protected void |
RowSimilarityJob.UnsymmetrifyMapper.map(org.apache.hadoop.io.IntWritable row,
VectorWritable similaritiesWritable,
org.apache.hadoop.mapreduce.Mapper.Context ctx)
|
static Vector.Element[] |
Vectors.toArray(VectorWritable vectorWritable)
|
Method parameters in org.apache.mahout.math.hadoop.similarity.cooccurrence with type arguments of type VectorWritable | |
---|---|
static Vector |
Vectors.merge(Iterable<VectorWritable> partialVectors)
|
protected void |
RowSimilarityJob.MergeVectorsCombiner.reduce(org.apache.hadoop.io.IntWritable row,
Iterable<VectorWritable> partialVectors,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
protected void |
RowSimilarityJob.MergeVectorsReducer.reduce(org.apache.hadoop.io.IntWritable row,
Iterable<VectorWritable> partialVectors,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
protected void |
RowSimilarityJob.SimilarityReducer.reduce(org.apache.hadoop.io.IntWritable row,
Iterable<VectorWritable> partialDots,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
protected void |
RowSimilarityJob.MergeToTopKSimilaritiesReducer.reduce(org.apache.hadoop.io.IntWritable row,
Iterable<VectorWritable> partials,
org.apache.hadoop.mapreduce.Reducer.Context ctx)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.stochasticsvd |
---|
Fields in org.apache.mahout.math.hadoop.stochasticsvd with type parameters of type VectorWritable | |
---|---|
protected org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Writable,VectorWritable> |
ABtJob.QRReducer.rhatCollector
|
protected org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Writable,VectorWritable> |
ABtDenseOutJob.QRReducer.rhatCollector
|
Methods in org.apache.mahout.math.hadoop.stochasticsvd with parameters of type VectorWritable | |
---|---|
protected void |
VJob.VMapper.map(org.apache.hadoop.io.IntWritable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
YtYJob.YtYMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
BtJob.BtMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
We maintain A and QtHat inputs partitioned the same way, so we essentially are performing map-side merge here of A and QtHats except QtHat is stored not row-wise but block-wise. |
protected void |
ABtDenseOutJob.ABtMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
ABtJob.ABtMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
QJob.QMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
protected void |
UJob.UMapper.map(org.apache.hadoop.io.Writable key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Method parameters in org.apache.mahout.math.hadoop.stochasticsvd with type arguments of type VectorWritable | |
---|---|
protected void |
YtYJob.YtYReducer.reduce(org.apache.hadoop.io.IntWritable key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context arg2)
|
Uses of VectorWritable in org.apache.mahout.math.hadoop.stochasticsvd.qr |
---|
Constructor parameters in org.apache.mahout.math.hadoop.stochasticsvd.qr with type arguments of type VectorWritable | |
---|---|
QRFirstStep(org.apache.hadoop.conf.Configuration jobConf,
org.apache.hadoop.mapred.OutputCollector<? super org.apache.hadoop.io.Writable,? super DenseBlockWritable> qtHatOut,
org.apache.hadoop.mapred.OutputCollector<? super org.apache.hadoop.io.Writable,? super VectorWritable> rHatOut)
|
|
QRLastStep(Iterator<DenseBlockWritable> qHatInput,
Iterator<VectorWritable> rHatInput,
int blockNum)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.common |
---|
Method parameters in org.apache.mahout.vectorizer.common with type arguments of type VectorWritable | |
---|---|
protected void |
PartialVectorMergeReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.pruner |
---|
Method parameters in org.apache.mahout.vectorizer.pruner with type arguments of type VectorWritable | |
---|---|
protected void |
WordsPrunerReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
protected void |
PrunedPartialVectorMergeReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.term |
---|
Methods in org.apache.mahout.vectorizer.term with parameters of type VectorWritable | |
---|---|
protected void |
TermDocumentCountMapper.map(org.apache.hadoop.io.WritableComparable<?> key,
VectorWritable value,
org.apache.hadoop.mapreduce.Mapper.Context context)
|
Uses of VectorWritable in org.apache.mahout.vectorizer.tfidf |
---|
Method parameters in org.apache.mahout.vectorizer.tfidf with type arguments of type VectorWritable | |
---|---|
protected void |
TFIDFPartialVectorReducer.reduce(org.apache.hadoop.io.WritableComparable<?> key,
Iterable<VectorWritable> values,
org.apache.hadoop.mapreduce.Reducer.Context context)
|
|
||||||||||
PREV NEXT | FRAMES NO FRAMES |