public class CqlStorage extends AbstractCassandraStorage
AbstractCassandraStorage.MarshallerType
column_family, conf, DEFAULT_INPUT_FORMAT, DEFAULT_OUTPUT_FORMAT, inputFormatClass, keyspace, loadSignature, outputFormatClass, PARTITION_FILTER_SIGNATURE, partitionerClass, password, PIG_INITIAL_ADDRESS, PIG_INPUT_FORMAT, PIG_INPUT_INITIAL_ADDRESS, PIG_INPUT_PARTITIONER, PIG_INPUT_RPC_PORT, PIG_INPUT_SPLIT_SIZE, PIG_OUTPUT_FORMAT, PIG_OUTPUT_INITIAL_ADDRESS, PIG_OUTPUT_PARTITIONER, PIG_OUTPUT_RPC_PORT, PIG_PARTITIONER, PIG_RPC_PORT, splitSize, storeSignature, usePartitionFilter, username
Constructor and Description |
---|
CqlStorage() |
CqlStorage(int pageSize) |
Modifier and Type | Method and Description |
---|---|
protected java.util.List<org.apache.cassandra.thrift.ColumnDef> |
getColumnMetadata(org.apache.cassandra.thrift.Cassandra.Client client,
boolean cql3Table)
include key columns
|
org.apache.pig.data.Tuple |
getNext()
get next row
|
org.apache.pig.ResourceSchema |
getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
schema: (value, value, value) where keys are in the front.
|
void |
prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split) |
void |
prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer) |
void |
putNext(org.apache.pig.data.Tuple t)
output: (((name, value), (name, value)), (value ...
|
void |
setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set read configuration settings
|
void |
setPartitionFilter(org.apache.pig.Expression partitionFilter) |
void |
setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set store configuration settings
|
cfdefFromString, cfdefToString, checkSchema, cleanupOnFailure, columnToTuple, composeComposite, getCfDef, getCfDef, getColumnMeta, getDefaultMarshallers, getFullyQualifiedClassName, getIndexes, getIndexType, getInputFormat, getKeysMeta, getOutputFormat, getPartitionKeys, getPigType, getQueryMap, getStatistics, getValidatorMap, initSchema, objToBB, parseType, relativeToAbsolutePath, relToAbsPathForStoreLocation, setConnectionInformation, setStoreFuncUDFContextSignature, setTupleValue, setUDFContextSignature
public CqlStorage()
public CqlStorage(int pageSize)
pageSize
- limit number of CQL rows to fetch in a thrift requestpublic void prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader, org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split)
prepareToRead
in class org.apache.pig.LoadFunc
public org.apache.pig.data.Tuple getNext() throws java.io.IOException
getNext
in class org.apache.pig.LoadFunc
java.io.IOException
public void setLocation(java.lang.String location, org.apache.hadoop.mapreduce.Job job) throws java.io.IOException
setLocation
in class org.apache.pig.LoadFunc
java.io.IOException
public void setStoreLocation(java.lang.String location, org.apache.hadoop.mapreduce.Job job) throws java.io.IOException
java.io.IOException
public org.apache.pig.ResourceSchema getSchema(java.lang.String location, org.apache.hadoop.mapreduce.Job job) throws java.io.IOException
java.io.IOException
public void setPartitionFilter(org.apache.pig.Expression partitionFilter)
public void prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer)
public void putNext(org.apache.pig.data.Tuple t) throws java.io.IOException
java.io.IOException
protected java.util.List<org.apache.cassandra.thrift.ColumnDef> getColumnMetadata(org.apache.cassandra.thrift.Cassandra.Client client, boolean cql3Table) throws org.apache.cassandra.thrift.InvalidRequestException, org.apache.cassandra.thrift.UnavailableException, org.apache.cassandra.thrift.TimedOutException, org.apache.cassandra.thrift.SchemaDisagreementException, org.apache.thrift.TException, java.nio.charset.CharacterCodingException
getColumnMetadata
in class AbstractCassandraStorage
org.apache.cassandra.thrift.InvalidRequestException
org.apache.cassandra.thrift.UnavailableException
org.apache.cassandra.thrift.TimedOutException
org.apache.cassandra.thrift.SchemaDisagreementException
org.apache.thrift.TException
java.nio.charset.CharacterCodingException
Copyright © 2013 The Apache Software Foundation