@UriParams public class HdfsConfiguration extends Object
Constructor and Description |
---|
HdfsConfiguration() |
Modifier and Type | Method and Description |
---|---|
void |
checkConsumerOptions() |
void |
checkProducerOptions() |
long |
getBlockSize() |
int |
getBufferSize() |
int |
getCheckIdleInterval() |
int |
getChunkSize() |
HdfsCompressionCodec |
getCompressionCodec() |
org.apache.hadoop.io.SequenceFile.CompressionType |
getCompressionType() |
HdfsFileSystemType |
getFileSystemType() |
HdfsFileType |
getFileType() |
String |
getHostName() |
WritableType |
getKeyType() |
String |
getOpenedSuffix() |
String |
getOwner() |
String |
getPath() |
String |
getPattern() |
int |
getPort() |
String |
getReadSuffix() |
short |
getReplication() |
List<HdfsProducer.SplitStrategy> |
getSplitStrategies() |
String |
getSplitStrategy() |
URI |
getUri() |
WritableType |
getValueType() |
boolean |
isAppend() |
boolean |
isConnectOnStartup() |
boolean |
isOverwrite() |
boolean |
isWantAppend() |
void |
parseURI(URI uri) |
void |
setAppend(boolean append)
Append to existing file.
|
void |
setBlockSize(long blockSize)
The size of the HDFS blocks
|
void |
setBufferSize(int bufferSize)
The buffer size used by HDFS
|
void |
setCheckIdleInterval(int checkIdleInterval)
How often (time in millis) in to run the idle checker background task.
|
void |
setChunkSize(int chunkSize)
When reading a normal file, this is split into chunks producing a message per chunk.
|
void |
setCompressionCodec(HdfsCompressionCodec compressionCodec)
The compression codec to use
|
void |
setCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType compressionType)
The compression type to use (is default not in use)
|
void |
setConnectOnStartup(boolean connectOnStartup)
Whether to connect to the HDFS file system on starting the producer/consumer.
|
void |
setFileSystemType(HdfsFileSystemType fileSystemType)
Set to LOCAL to not use HDFS but local java.io.File instead.
|
void |
setFileType(HdfsFileType fileType)
The file type to use.
|
void |
setHostName(String hostName)
HDFS host to use
|
void |
setKeyType(WritableType keyType)
The type for the key in case of sequence or map files.
|
void |
setOpenedSuffix(String openedSuffix)
When a file is opened for reading/writing the file is renamed with this suffix to avoid to read it during the writing phase.
|
void |
setOverwrite(boolean overwrite)
Whether to overwrite existing files with the same name
|
void |
setOwner(String owner)
The file owner must match this owner for the consumer to pickup the file.
|
void |
setPath(String path)
The directory path to use
|
void |
setPattern(String pattern)
The pattern used for scanning the directory
|
void |
setPort(int port)
HDFS port to use
|
void |
setReadSuffix(String readSuffix)
Once the file has been read is renamed with this suffix to avoid to read it again.
|
void |
setReplication(short replication)
The HDFS replication factor
|
void |
setSplitStrategy(String splitStrategy)
In the current version of Hadoop opening a file in append mode is disabled since it's not very reliable.
|
void |
setUri(URI uri) |
void |
setValueType(WritableType valueType)
The type for the key in case of sequence or map files
|
public void checkConsumerOptions()
public void checkProducerOptions()
public void parseURI(URI uri) throws URISyntaxException
URISyntaxException
public URI getUri()
public void setUri(URI uri)
public String getHostName()
public void setHostName(String hostName)
public int getPort()
public void setPort(int port)
public String getPath()
public void setPath(String path)
public boolean isOverwrite()
public void setOverwrite(boolean overwrite)
public boolean isAppend()
public boolean isWantAppend()
public void setAppend(boolean append)
public int getBufferSize()
public void setBufferSize(int bufferSize)
public short getReplication()
public void setReplication(short replication)
public long getBlockSize()
public void setBlockSize(long blockSize)
public HdfsFileType getFileType()
public void setFileType(HdfsFileType fileType)
public org.apache.hadoop.io.SequenceFile.CompressionType getCompressionType()
public void setCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType compressionType)
public HdfsCompressionCodec getCompressionCodec()
public void setCompressionCodec(HdfsCompressionCodec compressionCodec)
public void setFileSystemType(HdfsFileSystemType fileSystemType)
public HdfsFileSystemType getFileSystemType()
public WritableType getKeyType()
public void setKeyType(WritableType keyType)
public WritableType getValueType()
public void setValueType(WritableType valueType)
public void setOpenedSuffix(String openedSuffix)
public String getOpenedSuffix()
public void setReadSuffix(String readSuffix)
public String getReadSuffix()
public void setPattern(String pattern)
public String getPattern()
public void setChunkSize(int chunkSize)
public int getChunkSize()
public void setCheckIdleInterval(int checkIdleInterval)
public int getCheckIdleInterval()
public List<HdfsProducer.SplitStrategy> getSplitStrategies()
public String getSplitStrategy()
public void setSplitStrategy(String splitStrategy)
public boolean isConnectOnStartup()
public void setConnectOnStartup(boolean connectOnStartup)
public String getOwner()
public void setOwner(String owner)
Apache Camel