001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.filecache;
020    
021    import org.apache.hadoop.classification.InterfaceAudience;
022    import org.apache.hadoop.classification.InterfaceStability;
023    import org.apache.hadoop.conf.Configuration;
024    import org.apache.hadoop.fs.FileSystem;
025    import org.apache.hadoop.fs.Path;
026    import org.apache.hadoop.mapreduce.Job;
027    
028    /**
029     * Distribute application-specific large, read-only files efficiently.
030     * 
031     * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
032     * framework to cache files (text, archives, jars etc.) needed by applications.
033     * </p>
034     * 
035     * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached 
036     * via the {@link org.apache.hadoop.mapred.JobConf}. The
037     * <code>DistributedCache</code> assumes that the files specified via urls are
038     * already present on the {@link FileSystem} at the path specified by the url
039     * and are accessible by every machine in the cluster.</p>
040     * 
041     * <p>The framework will copy the necessary files on to the slave node before 
042     * any tasks for the job are executed on that node. Its efficiency stems from 
043     * the fact that the files are only copied once per job and the ability to 
044     * cache archives which are un-archived on the slaves.</p> 
045     *
046     * <p><code>DistributedCache</code> can be used to distribute simple, read-only
047     * data/text files and/or more complex types such as archives, jars etc. 
048     * Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes. 
049     * Jars may be optionally added to the classpath of the tasks, a rudimentary 
050     * software distribution mechanism.  Files have execution permissions.
051     * Optionally users can also direct it to symlink the distributed cache file(s)
052     * into the working directory of the task.</p>
053     * 
054     * <p><code>DistributedCache</code> tracks modification timestamps of the cache 
055     * files. Clearly the cache files should not be modified by the application 
056     * or externally while the job is executing.</p>
057     * 
058     * <p>Here is an illustrative example on how to use the 
059     * <code>DistributedCache</code>:</p>
060     * <p><blockquote><pre>
061     *     // Setting up the cache for the application
062     *     
063     *     1. Copy the requisite files to the <code>FileSystem</code>:
064     *     
065     *     $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat  
066     *     $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip  
067     *     $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
068     *     $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
069     *     $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
070     *     $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
071     *     
072     *     2. Setup the application's <code>JobConf</code>:
073     *     
074     *     JobConf job = new JobConf();
075     *     DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), 
076     *                                   job);
077     *     DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
078     *     DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
079     *     DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
080     *     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
081     *     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
082     *     
083     *     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
084     *     or {@link org.apache.hadoop.mapred.Reducer}:
085     *     
086     *     public static class MapClass extends MapReduceBase  
087     *     implements Mapper&lt;K, V, K, V&gt; {
088     *     
089     *       private Path[] localArchives;
090     *       private Path[] localFiles;
091     *       
092     *       public void configure(JobConf job) {
093     *         // Get the cached archives/files
094     *         localArchives = DistributedCache.getLocalCacheArchives(job);
095     *         localFiles = DistributedCache.getLocalCacheFiles(job);
096     *       }
097     *       
098     *       public void map(K key, V value, 
099     *                       OutputCollector&lt;K, V&gt; output, Reporter reporter) 
100     *       throws IOException {
101     *         // Use data from the cached archives/files here
102     *         // ...
103     *         // ...
104     *         output.collect(k, v);
105     *       }
106     *     }
107     *     
108     * </pre></blockquote></p>
109     *
110     * It is also very common to use the DistributedCache by using
111     * {@link org.apache.hadoop.util.GenericOptionsParser}.
112     *
113     * This class includes methods that should be used by users
114     * (specifically those mentioned in the example above, as well
115     * as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
116     * as well as methods intended for use by the MapReduce framework
117     * (e.g., {@link org.apache.hadoop.mapred.JobClient}).
118     *
119     * @see org.apache.hadoop.mapred.JobConf
120     * @see org.apache.hadoop.mapred.JobClient
121     * @see org.apache.hadoop.mapreduce.Job
122     */
123    @InterfaceAudience.Public
124    @InterfaceStability.Stable
125    public class DistributedCache extends
126        org.apache.hadoop.mapreduce.filecache.DistributedCache {
127      //
128    }