|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |
java.lang.Objectorg.apache.hadoop.hdfs.DFSClient
@InterfaceAudience.Private public class DFSClient
DFSClient can connect to a Hadoop Filesystem and perform basic file tasks. It uses the ClientProtocol to communicate with a NameNode daemon, and connects directly to DataNodes to read/write block data. Hadoop DFS users should obtain an instance of DistributedFileSystem, which uses DFSClient to handle filesystem tasks.
Nested Class Summary | |
---|---|
static class |
DFSClient.DFSDataInputStream
The Hdfs implementation of FSDataInputStream |
Nested classes/interfaces inherited from interface org.apache.hadoop.hdfs.protocol.FSConstants |
---|
FSConstants.DatanodeReportType, FSConstants.SafeModeAction, FSConstants.UpgradeAction |
Field Summary | |
---|---|
static org.apache.commons.logging.Log |
LOG
|
static int |
MAX_BLOCK_ACQUIRE_FAILURES
|
static long |
SERVER_DEFAULTS_VALIDITY_PERIOD
|
Constructor Summary | |
---|---|
DFSClient(org.apache.hadoop.conf.Configuration conf)
Deprecated. Deprecated at 0.21 |
|
DFSClient(InetSocketAddress nameNodeAddr,
org.apache.hadoop.conf.Configuration conf)
Same as this(nameNodeAddr, conf, null); |
|
DFSClient(InetSocketAddress nameNodeAddr,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem.Statistics stats)
Same as this(nameNodeAddr, null, conf, stats); |
Method Summary | |
---|---|
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
|
void |
close()
Close the file system, abandoning all of the leases and files being created and close connections to the namenode. |
void |
concat(String trg,
String[] srcs)
Move blocks from src to trg and delete src See ClientProtocol.concat(String, String []) . |
OutputStream |
create(String src,
boolean overwrite)
Call create(String, boolean, short, long, Progressable) with
default replication and blockSize |
OutputStream |
create(String src,
boolean overwrite,
org.apache.hadoop.util.Progressable progress)
Call create(String, boolean, short, long, Progressable) with
default replication and blockSize |
OutputStream |
create(String src,
boolean overwrite,
short replication,
long blockSize)
Call create(String, boolean, short, long, Progressable) with
null progress . |
OutputStream |
create(String src,
boolean overwrite,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress)
Call create(String, boolean, short, long, Progressable, int)
with default bufferSize. |
OutputStream |
create(String src,
boolean overwrite,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize)
Call create(String, FsPermission, EnumSet, short, long,
Progressable, int) with default permission
FsPermission.getDefault() . |
OutputStream |
create(String src,
org.apache.hadoop.fs.permission.FsPermission permission,
EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize)
Create a new dfs file with the specified block replication with write-progress reporting and return an output stream for writing into the file. |
OutputStream |
create(String src,
org.apache.hadoop.fs.permission.FsPermission permission,
EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize)
Call create(String, FsPermission, EnumSet, boolean, short,
long, Progressable, int) with createParent set to true. |
static ClientProtocol |
createNamenode(org.apache.hadoop.conf.Configuration conf)
The locking hierarchy is to first acquire lock on DFSClient object, followed by lock on leasechecker, followed by lock on an individual DFSOutputStream. |
static ClientProtocol |
createNamenode(InetSocketAddress nameNodeAddr,
org.apache.hadoop.conf.Configuration conf)
|
void |
createSymlink(String target,
String link,
boolean createParent)
Creates a symbolic link. |
DatanodeInfo[] |
datanodeReport(FSConstants.DatanodeReportType type)
|
boolean |
delete(String src)
Deprecated. |
boolean |
delete(String src,
boolean recursive)
delete file or directory. |
UpgradeStatusReport |
distributedUpgradeProgress(FSConstants.UpgradeAction action)
|
boolean |
exists(String src)
Implemented using getFileInfo(src) |
void |
finalizeUpgrade()
|
org.apache.hadoop.fs.BlockLocation[] |
getBlockLocations(String src,
long start,
long length)
Get block location info about file getBlockLocations() returns a list of hostnames that store data for a specific file region. |
long |
getBlockSize(String f)
|
long |
getCorruptBlocksCount()
Returns count of blocks with at least one replica marked corrupt. |
long |
getDefaultBlockSize()
Get the default block size for this cluster |
short |
getDefaultReplication()
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer)
|
org.apache.hadoop.fs.FsStatus |
getDiskStatus()
|
org.apache.hadoop.fs.MD5MD5CRC32FileChecksum |
getFileChecksum(String src)
Get the checksum of a file. |
static org.apache.hadoop.fs.MD5MD5CRC32FileChecksum |
getFileChecksum(String src,
ClientProtocol namenode,
SocketFactory socketFactory,
int socketTimeout)
Get the checksum of a file. |
HdfsFileStatus |
getFileInfo(String src)
Get the file info for a specific file or directory. |
HdfsFileStatus |
getFileLinkInfo(String src)
Get the file info for a specific file or directory. |
String |
getLinkTarget(String path)
Resolve the *first* symlink, if any, in the path. |
long |
getMissingBlocksCount()
Returns count of blocks with no good replicas left. |
ClientProtocol |
getNamenode()
Get the namenode associated with this DFSClient object |
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults()
Get server default values for a number of configuration params. |
long |
getUnderReplicatedBlocksCount()
Returns count of blocks with one of more replica missing. |
DirectoryListing |
listPaths(String src,
byte[] startAfter)
Get a partial listing of the indicated directory No block locations need to be fetched |
DirectoryListing |
listPaths(String src,
byte[] startAfter,
boolean needLocation)
Get a partial listing of the indicated directory Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter if the application wants to fetch a listing starting from the first entry in the directory |
void |
metaSave(String pathname)
Dumps DFS data structures into specified file. |
boolean |
mkdirs(String src)
Deprecated. |
boolean |
mkdirs(String src,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean createParent)
Create a directory (or hierarchy of directories) with the given name and permission. |
DFSInputStream |
open(String src)
|
DFSInputStream |
open(String src,
int buffersize,
boolean verifyChecksum)
Create an input stream that obtains a nodelist from the namenode, and then reads from all the right places. |
DFSInputStream |
open(String src,
int buffersize,
boolean verifyChecksum,
org.apache.hadoop.fs.FileSystem.Statistics stats)
Deprecated. Use open(String, int, boolean) instead. |
OutputStream |
primitiveCreate(String src,
org.apache.hadoop.fs.permission.FsPermission absPermission,
EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
int buffersize,
int bytesPerChecksum)
Same as { create(String, FsPermission, EnumSet, short, long,
Progressable, int) except that the permission
is absolute (ie has already been masked with umask. |
boolean |
primitiveMkdir(String src,
org.apache.hadoop.fs.permission.FsPermission absPermission)
Same { mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask. |
void |
refreshNodes()
Refresh the hosts and exclude files. |
boolean |
rename(String src,
String dst)
Deprecated. Use rename(String, String, Options.Rename...) instead. |
void |
rename(String src,
String dst,
org.apache.hadoop.fs.Options.Rename... options)
Rename file or directory. |
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
|
void |
reportBadBlocks(LocatedBlock[] blocks)
Report corrupt blocks that were discovered by the client. |
void |
setOwner(String src,
String username,
String groupname)
Set file or directory owner. |
void |
setPermission(String src,
org.apache.hadoop.fs.permission.FsPermission permission)
Set permissions to a file or directory. |
boolean |
setReplication(String src,
short replication)
Set replication for an existing file. |
boolean |
setSafeMode(FSConstants.SafeModeAction action)
Enter, leave or get safe mode. |
void |
setTimes(String src,
long mtime,
long atime)
set the modification and access time of a file |
static String |
stringifyToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
A test method for printing out tokens |
String |
toString()
|
Methods inherited from class java.lang.Object |
---|
clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait |
Field Detail |
---|
public static final org.apache.commons.logging.Log LOG
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD
public static final int MAX_BLOCK_ACQUIRE_FAILURES
Constructor Detail |
---|
@Deprecated public DFSClient(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
DFSClient(InetSocketAddress, Configuration)
public DFSClient(InetSocketAddress nameNodeAddr, org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)
public DFSClient(InetSocketAddress nameNodeAddr, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem.Statistics stats) throws IOException
IOException
DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)
Method Detail |
---|
public static ClientProtocol createNamenode(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr, org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public void close() throws IOException
close
in interface Closeable
IOException
public long getDefaultBlockSize()
public long getBlockSize(String f) throws IOException
IOException
ClientProtocol.getPreferredBlockSize(String)
public org.apache.hadoop.fs.FsServerDefaults getServerDefaults() throws IOException
IOException
ClientProtocol.getServerDefaults()
public static String stringifyToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws IOException
token
-
IOException
public org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer) throws IOException
IOException
ClientProtocol.getDelegationToken(Text)
public long renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws org.apache.hadoop.security.token.SecretManager.InvalidToken, IOException
org.apache.hadoop.security.token.SecretManager.InvalidToken
IOException
ClientProtocol.renewDelegationToken(Token)
public void cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws org.apache.hadoop.security.token.SecretManager.InvalidToken, IOException
org.apache.hadoop.security.token.SecretManager.InvalidToken
IOException
ClientProtocol.cancelDelegationToken(Token)
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException
IOException
ClientProtocol.reportBadBlocks(LocatedBlock[])
public short getDefaultReplication()
public org.apache.hadoop.fs.BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public DFSInputStream open(String src) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
@Deprecated public DFSInputStream open(String src, int buffersize, boolean verifyChecksum, org.apache.hadoop.fs.FileSystem.Statistics stats) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
open(String, int, boolean)
instead.
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public DFSInputStream open(String src, int buffersize, boolean verifyChecksum) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public ClientProtocol getNamenode()
public OutputStream create(String src, boolean overwrite) throws IOException
create(String, boolean, short, long, Progressable)
with
default replication
and blockSize and null
progress
.
- Throws:
IOException
public OutputStream create(String src, boolean overwrite, org.apache.hadoop.util.Progressable progress) throws IOException
create(String, boolean, short, long, Progressable)
with
default replication
and blockSize.
- Throws:
IOException
public OutputStream create(String src, boolean overwrite, short replication, long blockSize) throws IOException
create(String, boolean, short, long, Progressable)
with
null progress
.
IOException
public OutputStream create(String src, boolean overwrite, short replication, long blockSize, org.apache.hadoop.util.Progressable progress) throws IOException
create(String, boolean, short, long, Progressable, int)
with default bufferSize.
IOException
public OutputStream create(String src, boolean overwrite, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize) throws IOException
create(String, FsPermission, EnumSet, short, long,
Progressable, int)
with default permission
FsPermission.getDefault()
.
src
- File nameoverwrite
- overwrite an existing file if truereplication
- replication factor for the fileblockSize
- maximum block sizeprogress
- interface for reporting client progressbuffersize
- underlying buffersize
IOException
public OutputStream create(String src, org.apache.hadoop.fs.permission.FsPermission permission, EnumSet<org.apache.hadoop.fs.CreateFlag> flag, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize) throws IOException
create(String, FsPermission, EnumSet, boolean, short,
long, Progressable, int)
with createParent
set to true.
IOException
public OutputStream create(String src, org.apache.hadoop.fs.permission.FsPermission permission, EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize) throws IOException
src
- File namepermission
- The permission of the directory being created.
If null, use default permission FsPermission.getDefault()
flag
- indicates create a new file or create/overwrite an
existing file or append to an existing filecreateParent
- create missing parent directory if truereplication
- block replicationblockSize
- maximum block sizeprogress
- interface for reporting client progressbuffersize
- underlying buffer size
IOException
for detailed description of exceptions thrown
public OutputStream primitiveCreate(String src, org.apache.hadoop.fs.permission.FsPermission absPermission, EnumSet<org.apache.hadoop.fs.CreateFlag> flag, boolean createParent, short replication, long blockSize, org.apache.hadoop.util.Progressable progress, int buffersize, int bytesPerChecksum) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
create(String, FsPermission, EnumSet, short, long,
Progressable, int)
except that the permission
is absolute (ie has already been masked with umask.
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void createSymlink(String target, String link, boolean createParent) throws IOException
IOException
ClientProtocol.createSymlink(String, String,FsPermission, boolean)
public String getLinkTarget(String path) throws IOException
IOException
ClientProtocol.getLinkTarget(String)
public boolean setReplication(String src, short replication) throws IOException
src
- file namereplication
-
IOException
ClientProtocol.setReplication(String, short)
@Deprecated public boolean rename(String src, String dst) throws IOException
rename(String, String, Options.Rename...)
instead.
IOException
ClientProtocol.rename(String, String)
public void concat(String trg, String[] srcs) throws IOException
ClientProtocol.concat(String, String [])
.
IOException
public void rename(String src, String dst, org.apache.hadoop.fs.Options.Rename... options) throws IOException
IOException
ClientProtocol.rename(String, String, Options.Rename...)
@Deprecated public boolean delete(String src) throws IOException
ClientProtocol.delete(String)
.
IOException
public boolean delete(String src, boolean recursive) throws IOException
IOException
ClientProtocol.delete(String, boolean)
public boolean exists(String src) throws IOException
IOException
public DirectoryListing listPaths(String src, byte[] startAfter) throws IOException
IOException
public DirectoryListing listPaths(String src, byte[] startAfter, boolean needLocation) throws IOException
IOException
ClientProtocol.getListing(String, byte[], boolean)
public HdfsFileStatus getFileInfo(String src) throws IOException
src
- The string representation of the path to the file
IOException
for description of exceptions
public HdfsFileStatus getFileLinkInfo(String src) throws IOException
src
- path to a file or directory.
For description of exceptions thrown
IOException
ClientProtocol.getFileLinkInfo(String)
public org.apache.hadoop.fs.MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException
src
- The file path
IOException
DistributedFileSystem.getFileChecksum(Path)
public static org.apache.hadoop.fs.MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout) throws IOException
src
- The file path
IOException
public void setPermission(String src, org.apache.hadoop.fs.permission.FsPermission permission) throws IOException
src
- path name.permission
-
IOException
ClientProtocol.setPermission(String, FsPermission)
public void setOwner(String src, String username, String groupname) throws IOException
src
- path name.username
- user id.groupname
- user group.
IOException
ClientProtocol.setOwner(String, String, String)
public org.apache.hadoop.fs.FsStatus getDiskStatus() throws IOException
IOException
ClientProtocol.getStats()
public long getMissingBlocksCount() throws IOException
IOException
public long getUnderReplicatedBlocksCount() throws IOException
IOException
public long getCorruptBlocksCount() throws IOException
IOException
public DatanodeInfo[] datanodeReport(FSConstants.DatanodeReportType type) throws IOException
IOException
public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException
IOException
ClientProtocol.setSafeMode(FSConstants.SafeModeAction)
public void refreshNodes() throws IOException
ClientProtocol.refreshNodes()
for more details.
IOException
ClientProtocol.refreshNodes()
public void metaSave(String pathname) throws IOException
IOException
ClientProtocol.metaSave(String)
public void finalizeUpgrade() throws IOException
IOException
ClientProtocol.finalizeUpgrade()
public UpgradeStatusReport distributedUpgradeProgress(FSConstants.UpgradeAction action) throws IOException
IOException
ClientProtocol.distributedUpgradeProgress(FSConstants.UpgradeAction)
@Deprecated public boolean mkdirs(String src) throws IOException
IOException
public boolean mkdirs(String src, org.apache.hadoop.fs.permission.FsPermission permission, boolean createParent) throws IOException
src
- The path of the directory being createdpermission
- The permission of the directory being created.
If permission == null, use FsPermission.getDefault()
.createParent
- create missing parent directory if true
IOException
ClientProtocol.mkdirs(String, FsPermission, boolean)
public boolean primitiveMkdir(String src, org.apache.hadoop.fs.permission.FsPermission absPermission) throws IOException
mkdirs(String, FsPermission, boolean)
except
that the permissions has already been masked against umask.
IOException
public void setTimes(String src, long mtime, long atime) throws IOException
IOException
ClientProtocol.setTimes(String, long, long)
public String toString()
toString
in class Object
|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |