public class CqlStorage extends AbstractCassandraStorage
AbstractCassandraStorage.CfInfo, AbstractCassandraStorage.MarshallerType| Modifier and Type | Field and Description |
|---|---|
protected java.lang.String |
columns |
protected java.lang.String |
outputQuery |
protected int |
pageSize |
protected java.lang.String |
whereClause |
protected org.apache.hadoop.mapreduce.RecordWriter<java.util.Map<java.lang.String,java.nio.ByteBuffer>,java.util.List<java.nio.ByteBuffer>> |
writer |
column_family, conf, DEFAULT_INPUT_FORMAT, DEFAULT_OUTPUT_FORMAT, initHostAddress, inputFormatClass, keyspace, loadSignature, outputFormatClass, PARTITION_FILTER_SIGNATURE, partitionerClass, password, PIG_INITIAL_ADDRESS, PIG_INPUT_FORMAT, PIG_INPUT_INITIAL_ADDRESS, PIG_INPUT_PARTITIONER, PIG_INPUT_RPC_PORT, PIG_INPUT_SPLIT_SIZE, PIG_OUTPUT_FORMAT, PIG_OUTPUT_INITIAL_ADDRESS, PIG_OUTPUT_PARTITIONER, PIG_OUTPUT_RPC_PORT, PIG_PARTITIONER, PIG_RPC_PORT, rpcPort, splitSize, storeSignature, usePartitionFilter, username| Constructor and Description |
|---|
CqlStorage() |
CqlStorage(int pageSize) |
| Modifier and Type | Method and Description |
|---|---|
protected java.lang.Object |
cqlColumnToObj(Column col,
CfDef cfDef)
convert a cql column to an object
|
protected java.util.List<ColumnDef> |
getColumnMetadata(Cassandra.Client client)
include key columns
|
protected java.util.List<ColumnDef> |
getKeysMeta(Cassandra.Client client)
get keys meta data
|
org.apache.pig.data.Tuple |
getNext()
get next row
|
org.apache.pig.ResourceSchema |
getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
schema: (value, value, value) where keys are in the front.
|
java.nio.ByteBuffer |
nullToBB()
Thrift API can't handle null, so use empty byte array
|
void |
prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split) |
void |
prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer) |
void |
putNext(org.apache.pig.data.Tuple t)
output: (((name, value), (name, value)), (value ...
|
void |
setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set read configuration settings
|
void |
setPartitionFilter(org.apache.pig.Expression partitionFilter) |
void |
setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set store configuration settings
|
protected void |
setTupleValue(org.apache.pig.data.Tuple tuple,
int position,
java.lang.Object value,
AbstractType<?> validator)
set the value to the position of the tuple
|
cassandraToObj, cfdefFromString, cfdefToString, checkSchema, cleanupOnFailure, columnToTuple, composeComposite, getCfDefinition, getCfInfo, getCfInfo, getColumnMeta, getDefaultMarshallers, getFullyQualifiedClassName, getIndexes, getIndexType, getInputFormat, getOutputFormat, getPartitionKeys, getPigType, getQueryMap, getStatistics, getValidatorMap, initSchema, objToBB, parseType, relativeToAbsolutePath, relToAbsPathForStoreLocation, setConnectionInformation, setStoreFuncUDFContextSignature, setTupleValue, setUDFContextSignatureprotected org.apache.hadoop.mapreduce.RecordWriter<java.util.Map<java.lang.String,java.nio.ByteBuffer>,java.util.List<java.nio.ByteBuffer>> writer
protected int pageSize
protected java.lang.String columns
protected java.lang.String outputQuery
protected java.lang.String whereClause
public CqlStorage()
public CqlStorage(int pageSize)
pageSize - limit number of CQL rows to fetch in a thrift requestpublic void prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split)
prepareToRead in class org.apache.pig.LoadFuncpublic org.apache.pig.data.Tuple getNext()
throws java.io.IOException
getNext in class org.apache.pig.LoadFuncjava.io.IOExceptionprotected void setTupleValue(org.apache.pig.data.Tuple tuple,
int position,
java.lang.Object value,
AbstractType<?> validator)
throws org.apache.pig.backend.executionengine.ExecException
org.apache.pig.backend.executionengine.ExecExceptionprotected java.lang.Object cqlColumnToObj(Column col, CfDef cfDef) throws java.io.IOException
java.io.IOExceptionpublic void setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
setLocation in class org.apache.pig.LoadFuncjava.io.IOExceptionpublic void setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.pig.ResourceSchema getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
java.io.IOExceptionpublic void setPartitionFilter(org.apache.pig.Expression partitionFilter)
throws java.io.IOException
java.io.IOExceptionpublic void prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer)
public void putNext(org.apache.pig.data.Tuple t)
throws java.io.IOException
java.io.IOExceptionprotected java.util.List<ColumnDef> getColumnMetadata(Cassandra.Client client) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException, java.nio.charset.CharacterCodingException, InvalidRequestException, ConfigurationException, NotFoundException
getColumnMetadata in class AbstractCassandraStorageInvalidRequestExceptionUnavailableExceptionTimedOutExceptionSchemaDisagreementExceptionorg.apache.thrift.TExceptionjava.nio.charset.CharacterCodingExceptionConfigurationExceptionNotFoundExceptionprotected java.util.List<ColumnDef> getKeysMeta(Cassandra.Client client) throws java.lang.Exception
java.lang.Exceptionpublic java.nio.ByteBuffer nullToBB()
nullToBB in class AbstractCassandraStorageCopyright © 2015 The Apache Software Foundation