001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.lib.service.hadoop;
020
021import org.apache.hadoop.conf.Configuration;
022import org.apache.hadoop.fs.FileSystem;
023import org.apache.hadoop.lib.server.BaseService;
024import org.apache.hadoop.lib.server.ServiceException;
025import org.apache.hadoop.lib.service.FileSystemAccess;
026import org.apache.hadoop.lib.service.FileSystemAccessException;
027import org.apache.hadoop.lib.service.Instrumentation;
028import org.apache.hadoop.lib.util.Check;
029import org.apache.hadoop.lib.util.ConfigurationUtils;
030import org.apache.hadoop.security.UserGroupInformation;
031import org.apache.hadoop.util.VersionInfo;
032import org.slf4j.Logger;
033import org.slf4j.LoggerFactory;
034
035import java.io.IOException;
036import java.net.URI;
037import java.security.PrivilegedExceptionAction;
038import java.util.Collection;
039import java.util.HashSet;
040import java.util.Map;
041import java.util.Set;
042import java.util.concurrent.atomic.AtomicInteger;
043
044public class FileSystemAccessService extends BaseService implements FileSystemAccess {
045  private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
046
047  public static final String PREFIX = "hadoop";
048
049  private static final String INSTRUMENTATION_GROUP = "hadoop";
050
051  public static final String AUTHENTICATION_TYPE = "authentication.type";
052  public static final String KERBEROS_KEYTAB = "authentication.kerberos.keytab";
053  public static final String KERBEROS_PRINCIPAL = "authentication.kerberos.principal";
054
055  public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
056
057  private static final String HADOOP_CONF_PREFIX = "conf:";
058
059  private static final String NAME_NODE_PROPERTY = "fs.default.name";
060
061  public FileSystemAccessService() {
062    super(PREFIX);
063  }
064
065  private Collection<String> nameNodeWhitelist;
066
067  Configuration serviceHadoopConf;
068
069  private AtomicInteger unmanagedFileSystems = new AtomicInteger();
070
071  @Override
072  protected void init() throws ServiceException {
073    LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion());
074    String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
075    if (security.equals("kerberos")) {
076      String defaultName = getServer().getName();
077      String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
078      keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
079      if (keytab.length() == 0) {
080        throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB);
081      }
082      String principal = defaultName + "/localhost@LOCALHOST";
083      principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
084      if (principal.length() == 0) {
085        throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
086      }
087      Configuration conf = new Configuration();
088      conf.set("hadoop.security.authentication", "kerberos");
089      UserGroupInformation.setConfiguration(conf);
090      try {
091        UserGroupInformation.loginUserFromKeytab(principal, keytab);
092      } catch (IOException ex) {
093        throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex);
094      }
095      LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
096    } else if (security.equals("simple")) {
097      Configuration conf = new Configuration();
098      conf.set("hadoop.security.authentication", "simple");
099      UserGroupInformation.setConfiguration(conf);
100      LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
101    } else {
102      throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
103    }
104
105    serviceHadoopConf = new Configuration(false);
106    for (Map.Entry entry : getServiceConfig()) {
107      String name = (String) entry.getKey();
108      if (name.startsWith(HADOOP_CONF_PREFIX)) {
109        name = name.substring(HADOOP_CONF_PREFIX.length());
110        String value = (String) entry.getValue();
111        serviceHadoopConf.set(name, value);
112
113      }
114    }
115    setRequiredServiceHadoopConf(serviceHadoopConf);
116
117    LOG.debug("FileSystemAccess default configuration:");
118    for (Map.Entry entry : serviceHadoopConf) {
119      LOG.debug("  {} = {}", entry.getKey(), entry.getValue());
120    }
121
122    nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
123  }
124
125  @Override
126  public void postInit() throws ServiceException {
127    super.postInit();
128    Instrumentation instrumentation = getServer().get(Instrumentation.class);
129    instrumentation.addVariable(INSTRUMENTATION_GROUP, "unmanaged.fs", new Instrumentation.Variable<Integer>() {
130      @Override
131      public Integer getValue() {
132        return unmanagedFileSystems.get();
133      }
134    });
135    instrumentation.addSampler(INSTRUMENTATION_GROUP, "unmanaged.fs", 60, new Instrumentation.Variable<Long>() {
136      @Override
137      public Long getValue() {
138        return (long) unmanagedFileSystems.get();
139      }
140    });
141  }
142
143  private Set<String> toLowerCase(Collection<String> collection) {
144    Set<String> set = new HashSet<String>();
145    for (String value : collection) {
146      set.add(value.toLowerCase());
147    }
148    return set;
149  }
150
151  @Override
152  public Class getInterface() {
153    return FileSystemAccess.class;
154  }
155
156  @Override
157  public Class[] getServiceDependencies() {
158    return new Class[]{Instrumentation.class};
159  }
160
161  protected UserGroupInformation getUGI(String user) throws IOException {
162    return UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
163  }
164
165  protected void setRequiredServiceHadoopConf(Configuration conf) {
166    conf.set("fs.hdfs.impl.disable.cache", "true");
167  }
168
169  protected Configuration createHadoopConf(Configuration conf) {
170    Configuration hadoopConf = new Configuration();
171    ConfigurationUtils.copy(serviceHadoopConf, hadoopConf);
172    ConfigurationUtils.copy(conf, hadoopConf);
173    return hadoopConf;
174  }
175
176  protected Configuration createNameNodeConf(Configuration conf) {
177    return createHadoopConf(conf);
178  }
179
180  protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException {
181    return FileSystem.get(namenodeConf);
182  }
183
184  protected void closeFileSystem(FileSystem fs) throws IOException {
185    fs.close();
186  }
187
188  protected void validateNamenode(String namenode) throws FileSystemAccessException {
189    if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) {
190      if (!nameNodeWhitelist.contains(namenode.toLowerCase())) {
191        throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05, namenode, "not in whitelist");
192      }
193    }
194  }
195
196  protected void checkNameNodeHealth(FileSystem fileSystem) throws FileSystemAccessException {
197  }
198
199  @Override
200  public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor)
201    throws FileSystemAccessException {
202    Check.notEmpty(user, "user");
203    Check.notNull(conf, "conf");
204    Check.notNull(executor, "executor");
205    if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) {
206      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, NAME_NODE_PROPERTY);
207    }
208    try {
209      validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
210      UserGroupInformation ugi = getUGI(user);
211      return ugi.doAs(new PrivilegedExceptionAction<T>() {
212        public T run() throws Exception {
213          Configuration namenodeConf = createNameNodeConf(conf);
214          FileSystem fs = createFileSystem(namenodeConf);
215          Instrumentation instrumentation = getServer().get(Instrumentation.class);
216          Instrumentation.Cron cron = instrumentation.createCron();
217          try {
218            checkNameNodeHealth(fs);
219            cron.start();
220            return executor.execute(fs);
221          } finally {
222            cron.stop();
223            instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
224            closeFileSystem(fs);
225          }
226        }
227      });
228    } catch (FileSystemAccessException ex) {
229      throw ex;
230    } catch (Exception ex) {
231      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex);
232    }
233  }
234
235  public FileSystem createFileSystemInternal(String user, final Configuration conf)
236    throws IOException, FileSystemAccessException {
237    Check.notEmpty(user, "user");
238    Check.notNull(conf, "conf");
239    try {
240      validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
241      UserGroupInformation ugi = getUGI(user);
242      return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
243        public FileSystem run() throws Exception {
244          Configuration namenodeConf = createNameNodeConf(conf);
245          return createFileSystem(namenodeConf);
246        }
247      });
248    } catch (IOException ex) {
249      throw ex;
250    } catch (FileSystemAccessException ex) {
251      throw ex;
252    } catch (Exception ex) {
253      throw new FileSystemAccessException(FileSystemAccessException.ERROR.H08, ex.getMessage(), ex);
254    }
255  }
256
257  @Override
258  public FileSystem createFileSystem(String user, final Configuration conf) throws IOException,
259    FileSystemAccessException {
260    unmanagedFileSystems.incrementAndGet();
261    return createFileSystemInternal(user, conf);
262  }
263
264  @Override
265  public void releaseFileSystem(FileSystem fs) throws IOException {
266    unmanagedFileSystems.decrementAndGet();
267    closeFileSystem(fs);
268  }
269
270
271  @Override
272  public Configuration getDefaultConfiguration() {
273    Configuration conf = new Configuration(false);
274    ConfigurationUtils.copy(serviceHadoopConf, conf);
275    return conf;
276  }
277
278}