启动flume过一段时间就会报这个异常,请问该如何处理~~
(org.apache.flume.sink.hdfs.BucketWriter$4.call:387) - Closing file: hdfs://xxxxxx.
20160127-15.1453879548009.tmp failed. Will retry again in 180 seconds.
java.nio.channels.ClosedChannelException
at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622)
at org.apache.hadoop.hdfs.DFSOutputStream.flushOrSync(DFSOutputStream.java:1986)
at org.apache.hadoop.hdfs.DFSOutputStream.hflush(DFSOutputStream.java:1946)
at org.apache.hadoop.hdfs.DFSOutputStream.sync(DFSOutputStream.java:1931)
at org.apache.hadoop.fs.FSDataOutputStream.sync(FSDataOutputStream.java:123)
at org.apache.flume.sink.hdfs.HDFSDataStream.close(HDFSDataStream.java:139)
at org.apache.flume.sink.hdfs.BucketWriter$3.call(BucketWriter.java:341)
at org.apache.flume.sink.hdfs.BucketWriter$3.call(BucketWriter.java:335)
at org.apache.flume.sink.hdfs.BucketWriter$9$1.run(BucketWriter.java:718)
at org.apache.flume.sink.hdfs.BucketWriter.runPrivileged(BucketWriter.java:183)
at org.apache.flume.sink.hdfs.BucketWriter.access$1700(BucketWriter.java:59)
at org.apache.flume.sink.hdfs.BucketWriter$9.call(BucketWriter.java:715)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:724)