很多的:
java代码实例parquet.hadoop.parquetwriter
例子1:
[mw_shl_code=java,true]private boolean closeFile() {
boolean retval = false;
if( data.parquetWriters != null ) {
Iterator<ParquetWriter> openFiles = data.parquetWriters.iterator();
while ( openFiles.hasNext() ) {
ParquetWriter writer = openFiles.next();
if ( writer != null ) {
try {
writer.close();
} catch ( Exception e ) {
logBasic( "Error trying to close file. This may not be a problem.");
logDetailed( "Stack trace from error trying to close file:", e );
}
writer = null;
}
}
if ( log.isDebug() ) {
logDebug( "Closed all open parquet writers." );
}
}
return retval;
}
[/mw_shl_code]
例子2:
[mw_shl_code=java,true]public ParquetOutputData() {
super();
daf = new SimpleDateFormat();
dafs = new DateFormatSymbols();
defaultDateFormat = new SimpleDateFormat();
defaultDateFormatSymbols = new DateFormatSymbols();
openFiles = new ArrayList<String>();
parquetWriters = new ArrayList<ParquetWriter>();
}
[/mw_shl_code]
例子3:
[mw_shl_code=java,true]@Override
public void open() {
Preconditions.checkState(state.equals(ReaderWriterState.NEW),
"Unable to open a writer from state:%s", state);
logger.debug(
"Opening data file with pathTmp:{} (final path will be path:{})",
pathTmp, path);
try {
CompressionCodecName codecName = CompressionCodecName.UNCOMPRESSED;
if (enableCompression) {
if (SnappyCodec.isNativeCodeLoaded()) {
codecName = CompressionCodecName.SNAPPY;
} else {
logger.warn("Compression enabled, but Snappy native code not loaded. " +
"Parquet file will not be compressed.");
}
}
avroParquetWriter = new AvroParquetWriter<E>(fileSystem.makeQualified(pathTmp),
schema, codecName, DEFAULT_BLOCK_SIZE,
ParquetWriter.DEFAULT_PAGE_SIZE);
} catch (IOException e) {
throw new DatasetWriterException("Unable to create writer to path:" + pathTmp, e);
}
state = ReaderWriterState.OPEN;
}[/mw_shl_code]
更多
http://www.aboutyun.com/home.php ... do=blog&id=3078
|