Package | Description |
---|---|
org.apache.hadoop.mapred |
Modifier and Type | Method and Description |
---|---|
static TaskAttemptID |
TaskAttemptID.downgrade(TaskAttemptID old)
Downgrade a new TaskAttemptID to an old one
|
static TaskAttemptID |
TaskAttemptID.forName(String str)
Construct a TaskAttemptID object from given string
|
TaskAttemptID |
TaskReport.getSuccessfulTaskAttempt()
Get the attempt ID that took this task to completion
|
TaskAttemptID |
TaskCompletionEvent.getTaskAttemptId()
Returns task id.
|
TaskAttemptID |
TaskAttemptContextImpl.getTaskAttemptID()
Get the taskAttemptID.
|
TaskAttemptID |
TaskAttemptContext.getTaskAttemptID() |
TaskAttemptID |
Task.getTaskID() |
TaskAttemptID |
TaskStatus.getTaskID() |
static TaskAttemptID |
TaskAttemptID.read(DataInput in)
Deprecated.
|
Modifier and Type | Method and Description |
---|---|
List<TaskAttemptID> |
TaskStatus.getFetchFailedMaps()
Get the list of maps from which output-fetches failed.
|
Map<TaskAttemptID,MapOutputFile> |
ShuffleConsumerPlugin.Context.getLocalMapFiles() |
Collection<TaskAttemptID> |
TaskReport.getRunningTaskAttempts()
Get the running task attempt IDs for this task
|
Modifier and Type | Method and Description |
---|---|
abstract void |
TaskStatus.addFetchFailedMap(TaskAttemptID mapTaskId)
Add to the list of maps from which output-fetches failed.
|
boolean |
TaskUmbilicalProtocol.canCommit(TaskAttemptID taskid)
Polling to know whether the task can go-ahead with commit
|
void |
TaskUmbilicalProtocol.commitPending(TaskAttemptID taskId,
TaskStatus taskStatus)
Report that the task is complete, but its commit is pending.
|
static <K,V> Task.CombinerRunner<K,V> |
Task.CombinerRunner.create(JobConf job,
TaskAttemptID taskId,
Counters.Counter inputCounter,
Task.TaskReporter reporter,
OutputCommitter committer) |
void |
TaskUmbilicalProtocol.done(TaskAttemptID taskid)
Report that the task is successfully completed.
|
void |
TaskUmbilicalProtocol.fatalError(TaskAttemptID taskId,
String message)
Report that the task encounted a fatal error.
|
void |
TaskUmbilicalProtocol.fsError(TaskAttemptID taskId,
String message)
Report that the task encounted a local filesystem error.
|
MapTaskCompletionEventsUpdate |
TaskUmbilicalProtocol.getMapCompletionEvents(JobID jobId,
int fromIndex,
int maxLocs,
TaskAttemptID id)
Called by a reduce task to get the map output locations for finished maps.
|
String[] |
RunningJob.getTaskDiagnostics(TaskAttemptID taskid)
Gets the diagnostic messages for a given task attempt.
|
static File |
TaskLog.getTaskLogFile(TaskAttemptID taskid,
boolean isCleanup,
TaskLog.LogName filter) |
void |
RunningJob.killTask(TaskAttemptID taskId,
boolean shouldFail)
Kill indicated task attempt.
|
boolean |
TaskUmbilicalProtocol.ping(TaskAttemptID taskid)
Periodically called by child to check if parent is still alive.
|
void |
TaskUmbilicalProtocol.reportDiagnosticInfo(TaskAttemptID taskid,
String trace)
Report error messages back to parent.
|
protected void |
Task.reportFatalError(TaskAttemptID id,
Throwable throwable,
String logMsg)
Report a fatal error to the parent (task) tracker.
|
void |
TaskUmbilicalProtocol.reportNextRecordRange(TaskAttemptID taskid,
org.apache.hadoop.mapred.SortedRanges.Range range)
Report the record range which is going to process next by the Task.
|
void |
TaskReport.setSuccessfulAttempt(TaskAttemptID t)
set successful attempt ID of the task.
|
protected void |
TaskCompletionEvent.setTaskAttemptId(TaskAttemptID taskId)
Sets task id.
|
void |
TaskCompletionEvent.setTaskID(TaskAttemptID taskId)
Deprecated.
use
TaskCompletionEvent.setTaskAttemptId(TaskAttemptID) instead. |
void |
TaskUmbilicalProtocol.shuffleError(TaskAttemptID taskId,
String message)
Report that a reduce-task couldn't shuffle map-outputs.
|
boolean |
TaskUmbilicalProtocol.statusUpdate(TaskAttemptID taskId,
TaskStatus taskStatus)
Report child's progress to parent.
|
static void |
TaskLog.syncLogs(String logLocation,
TaskAttemptID taskid,
boolean isCleanup) |
Modifier and Type | Method and Description |
---|---|
void |
ReduceTask.setLocalMapFiles(Map<TaskAttemptID,MapOutputFile> mapFiles)
Register the set of mapper outputs created by a LocalJobRunner-based
job with this ReduceTask so it knows where to fetch from.
|
void |
TaskReport.setRunningTaskAttempts(Collection<TaskAttemptID> runningAttempts)
set running attempt(s) of the task.
|
Constructor and Description |
---|
MapTask(String jobFile,
TaskAttemptID taskId,
int partition,
JobSplit.TaskSplitIndex splitIndex,
int numSlotsRequired) |
Reader(TaskAttemptID taskid,
TaskLog.LogName kind,
long start,
long end,
boolean isCleanup)
Read a log file from start to end positions.
|
ReduceTask(String jobFile,
TaskAttemptID taskId,
int partition,
int numMaps,
int numSlotsRequired) |
Task(String jobFile,
TaskAttemptID taskId,
int partition,
int numSlotsRequired) |
TaskAttemptContextImpl(JobConf conf,
TaskAttemptID taskid) |
TaskCompletionEvent(int eventId,
TaskAttemptID taskId,
int idWithinJob,
boolean isMap,
TaskCompletionEvent.Status status,
String taskTrackerHttp)
Constructor.
|
TaskStatus(TaskAttemptID taskid,
float progress,
int numSlots,
TaskStatus.State runState,
String diagnosticInfo,
String stateString,
String taskTracker,
TaskStatus.Phase phase,
Counters counters) |
Constructor and Description |
---|
Context(TaskAttemptID reduceId,
JobConf jobConf,
org.apache.hadoop.fs.FileSystem localFS,
TaskUmbilicalProtocol umbilical,
org.apache.hadoop.fs.LocalDirAllocator localDirAllocator,
Reporter reporter,
org.apache.hadoop.io.compress.CompressionCodec codec,
Class<? extends Reducer> combinerClass,
Task.CombineOutputCollector<K,V> combineCollector,
Counters.Counter spilledRecordsCounter,
Counters.Counter reduceCombineInputCounter,
Counters.Counter shuffledMapsCounter,
Counters.Counter reduceShuffleBytes,
Counters.Counter failedShuffleCounter,
Counters.Counter mergedMapOutputsCounter,
TaskStatus status,
org.apache.hadoop.util.Progress copyPhase,
org.apache.hadoop.util.Progress mergePhase,
Task reduceTask,
MapOutputFile mapOutputFile,
Map<TaskAttemptID,MapOutputFile> localMapFiles) |
Copyright © 2017 Apache Software Foundation. All Rights Reserved.