001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.lib.service.hadoop;
020    
021    import org.apache.hadoop.conf.Configuration;
022    import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
023    import org.apache.hadoop.fs.FileSystem;
024    import org.apache.hadoop.fs.Path;
025    import org.apache.hadoop.lib.server.BaseService;
026    import org.apache.hadoop.lib.server.ServiceException;
027    import org.apache.hadoop.lib.service.FileSystemAccess;
028    import org.apache.hadoop.lib.service.FileSystemAccessException;
029    import org.apache.hadoop.lib.service.Instrumentation;
030    import org.apache.hadoop.lib.util.Check;
031    import org.apache.hadoop.lib.util.ConfigurationUtils;
032    import org.apache.hadoop.security.UserGroupInformation;
033    import org.apache.hadoop.util.VersionInfo;
034    import org.slf4j.Logger;
035    import org.slf4j.LoggerFactory;
036    
037    import java.io.File;
038    import java.io.IOException;
039    import java.net.URI;
040    import java.security.PrivilegedExceptionAction;
041    import java.util.Collection;
042    import java.util.HashSet;
043    import java.util.Map;
044    import java.util.Set;
045    import java.util.concurrent.atomic.AtomicInteger;
046    
047    public class FileSystemAccessService extends BaseService implements FileSystemAccess {
048      private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
049    
050      public static final String PREFIX = "hadoop";
051    
052      private static final String INSTRUMENTATION_GROUP = "hadoop";
053    
054      public static final String AUTHENTICATION_TYPE = "authentication.type";
055      public static final String KERBEROS_KEYTAB = "authentication.kerberos.keytab";
056      public static final String KERBEROS_PRINCIPAL = "authentication.kerberos.principal";
057    
058      public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
059    
060      public static final String HADOOP_CONF_DIR = "config.dir";
061    
062      private static final String[] HADOOP_CONF_FILES = {"core-site.xml", "hdfs-site.xml"};
063    
064      private static final String FILE_SYSTEM_SERVICE_CREATED = "FileSystemAccessService.created";
065    
066      public FileSystemAccessService() {
067        super(PREFIX);
068      }
069    
070      private Collection<String> nameNodeWhitelist;
071    
072      Configuration serviceHadoopConf;
073    
074      private AtomicInteger unmanagedFileSystems = new AtomicInteger();
075    
076      @Override
077      protected void init() throws ServiceException {
078        LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion());
079        String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
080        if (security.equals("kerberos")) {
081          String defaultName = getServer().getName();
082          String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
083          keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
084          if (keytab.length() == 0) {
085            throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB);
086          }
087          String principal = defaultName + "/localhost@LOCALHOST";
088          principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
089          if (principal.length() == 0) {
090            throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
091          }
092          Configuration conf = new Configuration();
093          conf.set("hadoop.security.authentication", "kerberos");
094          UserGroupInformation.setConfiguration(conf);
095          try {
096            UserGroupInformation.loginUserFromKeytab(principal, keytab);
097          } catch (IOException ex) {
098            throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex);
099          }
100          LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
101        } else if (security.equals("simple")) {
102          Configuration conf = new Configuration();
103          conf.set("hadoop.security.authentication", "simple");
104          UserGroupInformation.setConfiguration(conf);
105          LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
106        } else {
107          throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
108        }
109    
110        String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir());
111        File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
112        if (hadoopConfDir == null) {
113          hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
114        }
115        if (!hadoopConfDir.exists()) {
116          throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir);
117        }
118        try {
119          serviceHadoopConf = loadHadoopConf(hadoopConfDir);
120        } catch (IOException ex) {
121          throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
122        }
123    
124        LOG.debug("FileSystemAccess FileSystem configuration:");
125        for (Map.Entry entry : serviceHadoopConf) {
126          LOG.debug("  {} = {}", entry.getKey(), entry.getValue());
127        }
128        setRequiredServiceHadoopConf(serviceHadoopConf);
129    
130        nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
131      }
132    
133      private Configuration loadHadoopConf(File dir) throws IOException {
134        Configuration hadoopConf = new Configuration(false);
135        for (String file : HADOOP_CONF_FILES) {
136          File f = new File(dir, file);
137          if (f.exists()) {
138            hadoopConf.addResource(new Path(f.getAbsolutePath()));
139          }
140        }
141        return hadoopConf;
142      }
143    
144      @Override
145      public void postInit() throws ServiceException {
146        super.postInit();
147        Instrumentation instrumentation = getServer().get(Instrumentation.class);
148        instrumentation.addVariable(INSTRUMENTATION_GROUP, "unmanaged.fs", new Instrumentation.Variable<Integer>() {
149          @Override
150          public Integer getValue() {
151            return unmanagedFileSystems.get();
152          }
153        });
154        instrumentation.addSampler(INSTRUMENTATION_GROUP, "unmanaged.fs", 60, new Instrumentation.Variable<Long>() {
155          @Override
156          public Long getValue() {
157            return (long) unmanagedFileSystems.get();
158          }
159        });
160      }
161    
162      private Set<String> toLowerCase(Collection<String> collection) {
163        Set<String> set = new HashSet<String>();
164        for (String value : collection) {
165          set.add(value.toLowerCase());
166        }
167        return set;
168      }
169    
170      @Override
171      public Class getInterface() {
172        return FileSystemAccess.class;
173      }
174    
175      @Override
176      public Class[] getServiceDependencies() {
177        return new Class[]{Instrumentation.class};
178      }
179    
180      protected UserGroupInformation getUGI(String user) throws IOException {
181        return UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
182      }
183    
184      protected void setRequiredServiceHadoopConf(Configuration conf) {
185        conf.set("fs.hdfs.impl.disable.cache", "true");
186      }
187    
188      protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException {
189        return FileSystem.get(namenodeConf);
190      }
191    
192      protected void closeFileSystem(FileSystem fs) throws IOException {
193        fs.close();
194      }
195    
196      protected void validateNamenode(String namenode) throws FileSystemAccessException {
197        if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) {
198          if (!nameNodeWhitelist.contains(namenode.toLowerCase())) {
199            throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05, namenode, "not in whitelist");
200          }
201        }
202      }
203    
204      protected void checkNameNodeHealth(FileSystem fileSystem) throws FileSystemAccessException {
205      }
206    
207      @Override
208      public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor)
209        throws FileSystemAccessException {
210        Check.notEmpty(user, "user");
211        Check.notNull(conf, "conf");
212        Check.notNull(executor, "executor");
213        if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
214          throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
215        }
216        if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null ||
217            conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) {
218          throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06,
219                                              CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
220        }
221        try {
222          validateNamenode(
223            new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).
224              getAuthority());
225          UserGroupInformation ugi = getUGI(user);
226          return ugi.doAs(new PrivilegedExceptionAction<T>() {
227            public T run() throws Exception {
228              FileSystem fs = createFileSystem(conf);
229              Instrumentation instrumentation = getServer().get(Instrumentation.class);
230              Instrumentation.Cron cron = instrumentation.createCron();
231              try {
232                checkNameNodeHealth(fs);
233                cron.start();
234                return executor.execute(fs);
235              } finally {
236                cron.stop();
237                instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
238                closeFileSystem(fs);
239              }
240            }
241          });
242        } catch (FileSystemAccessException ex) {
243          throw ex;
244        } catch (Exception ex) {
245          throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex);
246        }
247      }
248    
249      public FileSystem createFileSystemInternal(String user, final Configuration conf)
250        throws IOException, FileSystemAccessException {
251        Check.notEmpty(user, "user");
252        Check.notNull(conf, "conf");
253        if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
254          throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
255        }
256        try {
257          validateNamenode(
258            new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
259          UserGroupInformation ugi = getUGI(user);
260          return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
261            public FileSystem run() throws Exception {
262              return createFileSystem(conf);
263            }
264          });
265        } catch (IOException ex) {
266          throw ex;
267        } catch (FileSystemAccessException ex) {
268          throw ex;
269        } catch (Exception ex) {
270          throw new FileSystemAccessException(FileSystemAccessException.ERROR.H08, ex.getMessage(), ex);
271        }
272      }
273    
274      @Override
275      public FileSystem createFileSystem(String user, final Configuration conf) throws IOException,
276        FileSystemAccessException {
277        unmanagedFileSystems.incrementAndGet();
278        return createFileSystemInternal(user, conf);
279      }
280    
281      @Override
282      public void releaseFileSystem(FileSystem fs) throws IOException {
283        unmanagedFileSystems.decrementAndGet();
284        closeFileSystem(fs);
285      }
286    
287      @Override
288      public Configuration getFileSystemConfiguration() {
289        Configuration conf = new Configuration(true);
290        ConfigurationUtils.copy(serviceHadoopConf, conf);
291        conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
292        return conf;
293      }
294    
295    }