public class EsHiveInputFormat extends EsInputFormat<org.apache.hadoop.io.Text,java.util.Map<org.apache.hadoop.io.Writable,org.apache.hadoop.io.Writable>>
FileInputFormat
to ESInputFormat.EsInputFormat.ShardInputSplit, EsInputFormat.ShardRecordReader<K,V>, EsInputFormat.WritableShardRecordReader
Constructor and Description |
---|
EsHiveInputFormat() |
Modifier and Type | Method and Description |
---|---|
EsInputFormat.WritableShardRecordReader |
getRecordReader(org.apache.hadoop.mapred.InputSplit split,
org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.mapred.Reporter reporter) |
org.apache.hadoop.mapred.FileSplit[] |
getSplits(org.apache.hadoop.mapred.JobConf job,
int numSplits) |
createRecordReader, getSplits
public org.apache.hadoop.mapred.FileSplit[] getSplits(org.apache.hadoop.mapred.JobConf job, int numSplits) throws java.io.IOException
getSplits
in interface org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.Text,java.util.Map<org.apache.hadoop.io.Writable,org.apache.hadoop.io.Writable>>
getSplits
in class EsInputFormat<org.apache.hadoop.io.Text,java.util.Map<org.apache.hadoop.io.Writable,org.apache.hadoop.io.Writable>>
java.io.IOException
public EsInputFormat.WritableShardRecordReader getRecordReader(org.apache.hadoop.mapred.InputSplit split, org.apache.hadoop.mapred.JobConf job, org.apache.hadoop.mapred.Reporter reporter)
getRecordReader
in interface org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.Text,java.util.Map<org.apache.hadoop.io.Writable,org.apache.hadoop.io.Writable>>
getRecordReader
in class EsInputFormat<org.apache.hadoop.io.Text,java.util.Map<org.apache.hadoop.io.Writable,org.apache.hadoop.io.Writable>>