public class BigQueryMapredInputFormat
extends java.lang.Object
implements org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.LongWritable,com.google.gson.JsonObject>
Constructor and Description |
---|
BigQueryMapredInputFormat() |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.LongWritable,com.google.gson.JsonObject> |
getRecordReader(org.apache.hadoop.mapred.InputSplit inputSplit,
org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.mapred.Reporter reporter)
Get a RecordReader by calling through to
AbstractBigQueryInputFormat.createRecordReader(org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) . |
org.apache.hadoop.mapred.InputSplit[] |
getSplits(org.apache.hadoop.mapred.JobConf job,
int numSplits)
|
public org.apache.hadoop.mapred.InputSplit[] getSplits(org.apache.hadoop.mapred.JobConf job, int numSplits) throws java.io.IOException
getSplits
in interface org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.LongWritable,com.google.gson.JsonObject>
job
- The config passed to us from the streaming package.numSplits
- We ignore this parameter.java.io.IOException
public org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.LongWritable,com.google.gson.JsonObject> getRecordReader(org.apache.hadoop.mapred.InputSplit inputSplit, org.apache.hadoop.mapred.JobConf conf, org.apache.hadoop.mapred.Reporter reporter) throws java.io.IOException
AbstractBigQueryInputFormat.createRecordReader(org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext)
.getRecordReader
in interface org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.LongWritable,com.google.gson.JsonObject>
java.io.IOException
Copyright © 2018. All rights reserved.