org.apache.hadoop.mapreduce.filecache.DistributedCache.addArchiveToClassPath(Path, Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheArchive(URI, Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheFile(URI, Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.addFileToClassPath(Path, Configuration)
|
org.apache.hadoop.mapred.JobClient.cancelDelegationToken(Token)
Use Token.cancel(org.apache.hadoop.conf.Configuration) instead
|
org.apache.hadoop.mapreduce.Cluster.cancelDelegationToken(Token)
Use Token.cancel(org.apache.hadoop.conf.Configuration) instead
|
org.apache.hadoop.mapred.TaskLog.captureOutAndError(List, List, File, File, long, String)
pidFiles are no more used. Instead pid is exported to
env variable JVM_PID.
|
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext) |
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext) |
org.apache.hadoop.mapred.Counters.Counter.contentEquals(Counters.Counter) |
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List)
|
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...)
|
org.apache.hadoop.mapreduce.Job.createSymlink() |
org.apache.hadoop.mapreduce.filecache.DistributedCache.createSymlink(Configuration)
This is a NO-OP.
|
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue() |
org.apache.hadoop.mapred.JobConf.deleteLocalFiles() |
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
|
org.apache.hadoop.mapreduce.Cluster.getAllJobs()
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getArchiveClassPaths(Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getArchiveTimestamps(Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getCacheArchives(Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getCacheFiles(Configuration)
|
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getFileClassPaths(Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getFileTimestamps(Configuration)
|
org.apache.hadoop.mapreduce.Job.getInstance(Cluster)
|
org.apache.hadoop.mapreduce.Job.getInstance(Cluster, Configuration)
|
org.apache.hadoop.mapred.JobClient.getJob(String)
|
org.apache.hadoop.mapred.JobProfile.getJobId()
use getJobID() instead
|
org.apache.hadoop.mapred.JobStatus.getJobId()
use getJobID instead
|
org.apache.hadoop.mapred.RunningJob.getJobID()
This method is deprecated and will be removed. Applications should
rather use RunningJob.getID() .
|
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer) |
org.apache.hadoop.mapreduce.JobContext.getLocalCacheArchives()
the array returned only includes the items the were
downloaded. There is no way to map this to what is returned by
JobContext.getCacheArchives() .
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getLocalCacheArchives(Configuration)
|
org.apache.hadoop.mapreduce.JobContext.getLocalCacheFiles()
the array returned only includes the items the were
downloaded. There is no way to map this to what is returned by
JobContext.getCacheFiles() .
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.getLocalCacheFiles(Configuration)
|
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
|
org.apache.hadoop.mapred.ClusterStatus.getMaxMemory() |
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
this variable is deprecated and nolonger in use.
|
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
|
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos() |
org.apache.hadoop.mapred.JobQueueInfo.getQueueState() |
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
|
org.apache.hadoop.mapred.JobConf.getSessionId() |
org.apache.hadoop.mapreduce.JobContext.getSymlink() |
org.apache.hadoop.mapreduce.filecache.DistributedCache.getSymlink(Configuration)
symlinks are always created.
|
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer) |
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer) |
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
|
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
|
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer) |
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter() |
org.apache.hadoop.mapred.ClusterStatus.getUsedMemory() |
org.apache.hadoop.mapreduce.TaskID.isMap() |
org.apache.hadoop.mapreduce.TaskAttemptID.isMap() |
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
|
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
|
org.apache.hadoop.mapred.JobID.read(DataInput) |
org.apache.hadoop.mapred.TaskID.read(DataInput) |
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput) |
org.apache.hadoop.mapred.JobClient.renewDelegationToken(Token)
Use Token.renew(org.apache.hadoop.conf.Configuration) instead
|
org.apache.hadoop.mapreduce.Cluster.renewDelegationToken(Token)
Use Token.renew(org.apache.hadoop.conf.Configuration) instead
|
org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID)
setAssignedJobID should not be called.
JOBID is set by the framework.
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.setCacheArchives(URI[], Configuration)
|
org.apache.hadoop.mapreduce.filecache.DistributedCache.setCacheFiles(URI[], Configuration)
|
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
(and no-op by default)
|
org.apache.hadoop.mapreduce.counters.GenericCounter.setDisplayName(String) |
org.apache.hadoop.mapreduce.counters.AbstractCounter.setDisplayName(String) |
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long) |
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
|
org.apache.hadoop.mapred.JobConf.setSessionId(String) |
org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree.setSigKillInterval(long)
|
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
|
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter) |
org.apache.hadoop.mapred.Counters.size()
|
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
|