org.apache.hadoop.conf.Configuration.addDeprecation(String, String[])
|
org.apache.hadoop.conf.Configuration.addDeprecation(String, String[], String)
|
org.apache.hadoop.ipc.Client.call(RPC.RpcKind, Writable, InetSocketAddress)
|
org.apache.hadoop.ipc.Client.call(RPC.RpcKind, Writable, InetSocketAddress, Class>, UserGroupInformation, int)
|
org.apache.hadoop.ipc.Client.call(RPC.RpcKind, Writable, InetSocketAddress, UserGroupInformation)
|
org.apache.hadoop.ipc.Server.call(Writable, long)
Use #call(RpcPayloadHeader.RpcKind, String,
Writable, long) instead
|
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
use ReflectionUtils.cloneInto instead.
|
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable) |
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, boolean, int, short, long, Progressable)
API only for 0.20-append
|
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, boolean, int, short, long, Progressable)
API only for 0.20-append
|
org.apache.hadoop.fs.RawLocalFileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable) |
org.apache.hadoop.fs.FilterFileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable) |
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable)
API only for 0.20-append
|
org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner(byte[], byte[])
|
org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner(RawComparable, RawComparable)
|
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec)
|
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, boolean, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata) |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, Progressable)
|
org.apache.hadoop.fs.FileSystem.delete(Path)
|
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
|
org.apache.hadoop.io.BytesWritable.get()
|
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
Use getFileStatus() instead
|
org.apache.hadoop.fs.FileSystem.getDefaultBlockSize()
|
org.apache.hadoop.fs.FileSystem.getDefaultReplication()
|
org.apache.hadoop.fs.FileSystem.getLength(Path)
Use getFileStatus() instead
|
org.apache.hadoop.fs.FileSystem.getName()
call #getUri() instead.
|
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
call #get(URI,Configuration) instead.
|
org.apache.hadoop.fs.FileSystem.getReplication(Path)
Use getFileStatus() instead
|
org.apache.hadoop.ipc.RPC.getServer(Class>, Object, String, int, Configuration)
|
org.apache.hadoop.ipc.RPC.getServer(Class>, Object, String, int, int, boolean, Configuration)
|
org.apache.hadoop.ipc.RPC.getServer(Class>, Object, String, int, int, boolean, Configuration, SecretManager extends TokenIdentifier>)
|
org.apache.hadoop.ipc.RPC.getServer(Class>, Object, String, int, int, boolean, Configuration, SecretManager extends TokenIdentifier>, String)
|
org.apache.hadoop.ipc.RPC.getServer(Class, IMPL, String, int, int, int, int, boolean, Configuration, SecretManager extends TokenIdentifier>)
|
org.apache.hadoop.ipc.RPC.getServer(Object, String, int, Configuration)
|
org.apache.hadoop.ipc.RPC.getServer(Object, String, int, int, boolean, Configuration)
|
org.apache.hadoop.fs.FileSystem.getServerDefaults()
|
org.apache.hadoop.io.BytesWritable.getSize()
|
org.apache.hadoop.fs.FileSystem.getStatistics()
|
org.apache.hadoop.fs.FileStatus.isDir()
|
org.apache.hadoop.fs.kfs.KosmosFileSystem.lock(Path, boolean) |
org.apache.hadoop.fs.Path.makeQualified(FileSystem) |
org.apache.hadoop.metrics2.util.MetricsCache.Record.metrics()
use metricsEntrySet() instead
|
org.apache.hadoop.fs.FileSystem.primitiveCreate(Path, FsPermission, EnumSet, int, short, long, Progressable, Options.ChecksumOpt) |
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission) |
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission, boolean) |
org.apache.hadoop.fs.kfs.KosmosFileSystem.release(Path) |
org.apache.hadoop.fs.FileSystem.rename(Path, Path, Options.Rename...) |
org.apache.hadoop.fs.Syncable.sync()
As of HADOOP 0.21.0, replaced by hflush
|
org.apache.hadoop.fs.FSDataOutputStream.sync() |
org.apache.hadoop.io.SequenceFile.Writer.syncFs()
|