在我们集群中有一台机器regionserver服务总是deal,查了资料说hdfs文件被修改了,但是我没有修改过,日志内容:2014-01-06 09:02:45,551 WARN org.apache.hadoop.hdfs.DFSClient: Failed recovery attempt #4 from primary datanode 192.168.1.78:50010
org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.ipc.RemoteException: java.io.IOException: blk_4093330436295980481_117353 has out of date GS 117353 found 117416, may already be committed
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.nextGenerationStampForBlock(FSNamesystem.java:5943)
at org.apache.hadoop.hdfs.server.namenode.NameNode.nextGenerationStamp(NameNode.java:835)
at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy5.nextGenerationStamp(Unknown Source)
at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:2127)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2095)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2175)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy14.recoverBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3319)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:02:45,551 WARN org.apache.hadoop.hdfs.DFSClient: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.78:50010 failed 5 times. Pipeline was 192.168.1.78:50010, 192.168.1.80:50010. Will retry...
2014-01-06 09:02:46,562 WARN org.apache.hadoop.hdfs.DFSClient: Failed recovery attempt #5 from primary datanode 192.168.1.78:50010
org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.ipc.RemoteException: java.io.IOException: blk_4093330436295980481_117353 has out of date GS 117353 found 117416, may already be committed
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.nextGenerationStampForBlock(FSNamesystem.java:5943)
at org.apache.hadoop.hdfs.server.namenode.NameNode.nextGenerationStamp(NameNode.java:835)
at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy5.nextGenerationStamp(Unknown Source)
at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:2127)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2095)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2175)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy14.recoverBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3319)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:02:46,562 WARN org.apache.hadoop.hdfs.DFSClient: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.78:50010 failed 6 times. Pipeline was 192.168.1.78:50010, 192.168.1.80:50010. Marking primary datanode as bad.
2014-01-06 09:02:47,569 WARN org.apache.hadoop.hdfs.DFSClient: Failed recovery attempt #0 from primary datanode 192.168.1.80:50010
org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.ipc.RemoteException: java.io.IOException: blk_4093330436295980481_117353 has out of date GS 117353 found 117416, may already be committed
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.nextGenerationStampForBlock(FSNamesystem.java:5943)
at org.apache.hadoop.hdfs.server.namenode.NameNode.nextGenerationStamp(NameNode.java:835)
at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy5.nextGenerationStamp(Unknown Source)
at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:2127)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2095)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2175)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy14.recoverBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3319)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:02:50,596 WARN org.apache.hadoop.hdfs.DFSClient: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.80:50010 failed 4 times. Pipeline was 192.168.1.80:50010. Will retry...
2014-01-06 09:02:51,603 WARN org.apache.hadoop.hdfs.DFSClient: Failed recovery attempt #4 from primary datanode 192.168.1.80:50010
org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.ipc.RemoteException: java.io.IOException: blk_4093330436295980481_117353 has out of date GS 117353 found 117416, may already be committed
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.nextGenerationStampForBlock(FSNamesystem.java:5943)
at org.apache.hadoop.hdfs.server.namenode.NameNode.nextGenerationStamp(NameNode.java:835)
at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy5.nextGenerationStamp(Unknown Source)
at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:2127)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2095)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2175)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy14.recoverBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3319)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:02:51,604 WARN org.apache.hadoop.hdfs.DFSClient: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.80:50010 failed 5 times. Pipeline was 192.168.1.80:50010. Will retry...
2014-01-06 09:02:53,126 WARN org.apache.hadoop.hdfs.DFSClient: Failed recovery attempt #5 from primary datanode 192.168.1.80:50010
org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.ipc.RemoteException: java.io.IOException: blk_4093330436295980481_117353 has out of date GS 117353 found 117416, may already be committed
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.nextGenerationStampForBlock(FSNamesystem.java:5943)
at org.apache.hadoop.hdfs.server.namenode.NameNode.nextGenerationStamp(NameNode.java:835)
at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy5.nextGenerationStamp(Unknown Source)
at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:2127)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2095)
at org.apache.hadoop.hdfs.server.datanode.DataNode.recoverBlock(DataNode.java:2175)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:587)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1432)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1428)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1426)
at org.apache.hadoop.ipc.Client.call(Client.java:1113)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
at com.sun.proxy.$Proxy14.recoverBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3319)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:02:53,127 WARN org.apache.hadoop.hdfs.DFSClient: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.80:50010 failed 6 times. Pipeline was 192.168.1.80:50010. Aborting...
2014-01-06 09:02:53,127 ERROR org.apache.hadoop.hbase.regionserver.HRegionServer: Close and delete failed
java.io.IOException: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.80:50010 failed 6 times. Pipeline was 192.168.1.80:50010. Aborting...
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3355)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:02:53,128 INFO org.apache.hadoop.hbase.regionserver.Leases: regionserver60020 closing leases
2014-01-06 09:02:53,128 INFO org.apache.hadoop.hbase.regionserver.Leases: regionserver60020 closed leases
2014-01-06 09:02:53,128 DEBUG org.apache.hadoop.hbase.regionserver.CompactSplitThread: Waiting for Split Thread to finish...
2014-01-06 09:02:53,128 DEBUG org.apache.hadoop.hbase.regionserver.CompactSplitThread: Waiting for Large Compaction Thread to finish...
2014-01-06 09:02:53,128 DEBUG org.apache.hadoop.hbase.regionserver.CompactSplitThread: Waiting for Small Compaction Thread to finish...
2014-01-06 09:02:53,131 WARN org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper: Possibly transient ZooKeeper exception: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /hbase/rs/DN78,60020,1388931449604
2014-01-06 09:02:53,131 INFO org.apache.hadoop.hbase.util.RetryCounter: Sleeping 2000ms before retry #1...
2014-01-06 09:02:55,131 WARN org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper: Possibly transient ZooKeeper exception: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /hbase/rs/DN78,60020,1388931449604
2014-01-06 09:02:55,131 INFO org.apache.hadoop.hbase.util.RetryCounter: Sleeping 4000ms before retry #2...
2014-01-06 09:02:56,952 INFO org.apache.hadoop.hbase.regionserver.Leases: regionserver60020.leaseChecker closing leases
2014-01-06 09:02:56,952 INFO org.apache.hadoop.hbase.regionserver.Leases: regionserver60020.leaseChecker closed leases
2014-01-06 09:02:59,131 WARN org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper: Possibly transient ZooKeeper exception: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /hbase/rs/DN78,60020,1388931449604
2014-01-06 09:02:59,132 INFO org.apache.hadoop.hbase.util.RetryCounter: Sleeping 8000ms before retry #3...
2014-01-06 09:03:07,132 WARN org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper: Possibly transient ZooKeeper exception: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /hbase/rs/DN78,60020,1388931449604
2014-01-06 09:03:07,132 ERROR org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper: ZooKeeper delete failed after 3 retries
2014-01-06 09:03:07,132 WARN org.apache.hadoop.hbase.regionserver.HRegionServer: Failed deleting my ephemeral node
org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /hbase/rs/DN78,60020,1388931449604
at org.apache.zookeeper.KeeperException.create(KeeperException.java:127)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:51)
at org.apache.zookeeper.ZooKeeper.delete(ZooKeeper.java:873)
at org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.delete(RecoverableZooKeeper.java:133)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.deleteNode(ZKUtil.java:1195)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.deleteNode(ZKUtil.java:1184)
at org.apache.hadoop.hbase.regionserver.HRegionServer.deleteMyEphemeralNode(HRegionServer.java:1133)
at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:900)
at java.lang.Thread.run(Thread.java:662)
2014-01-06 09:03:07,133 INFO org.apache.hadoop.hbase.regionserver.HRegionServer: stopping server DN78,60020,1388931449604; zookeeper connection closed.
2014-01-06 09:03:07,133 INFO org.apache.hadoop.hbase.regionserver.HRegionServer: regionserver60020 exiting
2014-01-06 09:03:07,134 INFO org.apache.hadoop.hbase.regionserver.ShutdownHook: Shutdown hook starting; hbase.shutdown.hook=true; fsShutdownHook=Thread[Thread-5,5,main]
2014-01-06 09:03:07,134 INFO org.apache.hadoop.hbase.regionserver.HRegionServer: STOPPED: Shutdown hook
2014-01-06 09:03:07,135 INFO org.apache.hadoop.hbase.regionserver.ShutdownHook: Starting fs shutdown hook thread.
2014-01-06 09:03:07,135 ERROR org.apache.hadoop.hdfs.DFSClient: Failed to close file /hbase/.logs/DN78,60020,1388931449604/DN78%2C60020%2C1388931449604.1388969000971
java.io.IOException: Error Recovery for block blk_4093330436295980481_117353 failed because recovery from primary datanode 192.168.1.80:50010 failed 6 times. Pipeline was 192.168.1.80:50010. Aborting...
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.processDatanodeError(DFSClient.java:3355)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2200(DFSClient.java:2783)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2987)
2014-01-06 09:03:07,136 INFO org.apache.hadoop.hbase.regionserver.ShutdownHook: Shutdown hook finished.
|