public class CassandraStorage extends AbstractCassandraStorage
AbstractCassandraStorage.CfInfo, AbstractCassandraStorage.MarshallerType| Modifier and Type | Field and Description |
|---|---|
static java.lang.String |
PIG_ALLOW_DELETES |
static java.lang.String |
PIG_USE_SECONDARY |
static java.lang.String |
PIG_WIDEROW_INPUT |
column_family, conf, DEFAULT_INPUT_FORMAT, DEFAULT_OUTPUT_FORMAT, initHostAddress, inputFormatClass, keyspace, loadSignature, outputFormatClass, PARTITION_FILTER_SIGNATURE, partitionerClass, password, PIG_INITIAL_ADDRESS, PIG_INPUT_FORMAT, PIG_INPUT_INITIAL_ADDRESS, PIG_INPUT_PARTITIONER, PIG_INPUT_RPC_PORT, PIG_INPUT_SPLIT_SIZE, PIG_OUTPUT_FORMAT, PIG_OUTPUT_INITIAL_ADDRESS, PIG_OUTPUT_PARTITIONER, PIG_OUTPUT_RPC_PORT, PIG_PARTITIONER, PIG_RPC_PORT, rpcPort, splitSize, storeSignature, usePartitionFilter, username| Constructor and Description |
|---|
CassandraStorage() |
CassandraStorage(int limit) |
| Modifier and Type | Method and Description |
|---|---|
protected java.util.List<ColumnDef> |
getColumnMetadata(Cassandra.Client client)
get a list of column for the column family
|
int |
getLimit() |
org.apache.pig.data.Tuple |
getNext() |
org.apache.pig.data.Tuple |
getNextWide()
read wide row
|
org.apache.pig.ResourceSchema |
getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
define the schema
|
java.nio.ByteBuffer |
nullToBB() |
void |
prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split) |
void |
prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer)
prepare writer
|
void |
putNext(org.apache.pig.data.Tuple t)
write next row
|
protected void |
setConnectionInformation()
set hadoop cassandra connection settings
|
void |
setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set read configuration settings
|
void |
setPartitionFilter(org.apache.pig.Expression partitionFilter)
set partition filter
|
void |
setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set store configuration settings
|
cassandraToObj, cfdefFromString, cfdefToString, checkSchema, cleanupOnFailure, columnToTuple, composeComposite, getCfDefinition, getCfInfo, getCfInfo, getColumnMeta, getDefaultMarshallers, getFullyQualifiedClassName, getIndexes, getIndexType, getInputFormat, getOutputFormat, getPartitionKeys, getPigType, getQueryMap, getStatistics, getValidatorMap, initSchema, objToBB, parseType, relativeToAbsolutePath, relToAbsPathForStoreLocation, setStoreFuncUDFContextSignature, setTupleValue, setUDFContextSignaturepublic static final java.lang.String PIG_ALLOW_DELETES
public static final java.lang.String PIG_WIDEROW_INPUT
public static final java.lang.String PIG_USE_SECONDARY
public CassandraStorage()
public CassandraStorage(int limit)
limit - number of columns to fetch in a slicepublic int getLimit()
public void prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split)
prepareToRead in class org.apache.pig.LoadFuncpublic org.apache.pig.data.Tuple getNextWide()
throws java.io.IOException
java.io.IOExceptionpublic org.apache.pig.data.Tuple getNext()
throws java.io.IOException
getNext in class org.apache.pig.LoadFuncjava.io.IOExceptionprotected void setConnectionInformation()
throws java.io.IOException
setConnectionInformation in class AbstractCassandraStoragejava.io.IOExceptionpublic void setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
setLocation in class org.apache.pig.LoadFuncjava.io.IOExceptionpublic void setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.pig.ResourceSchema getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
java.io.IOExceptionpublic void setPartitionFilter(org.apache.pig.Expression partitionFilter)
throws java.io.IOException
java.io.IOExceptionpublic void prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer)
public void putNext(org.apache.pig.data.Tuple t)
throws java.io.IOException
java.io.IOExceptionprotected java.util.List<ColumnDef> getColumnMetadata(Cassandra.Client client) throws org.apache.thrift.TException, java.nio.charset.CharacterCodingException, InvalidRequestException, ConfigurationException
getColumnMetadata in class AbstractCassandraStorageorg.apache.thrift.TExceptionjava.nio.charset.CharacterCodingExceptionInvalidRequestExceptionConfigurationExceptionpublic java.nio.ByteBuffer nullToBB()
nullToBB in class AbstractCassandraStorageCopyright © 2015 The Apache Software Foundation