HDFS全分布datanode异常
我按照手册用两台机器部署全分布,上传文件时出错:[*]# hadoop dfs -put core-site.xml /test/[*]13/03/14 16:24:24 WARN hdfs.DFSClient: DataStreamer Exception: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /test/core-site.xml could only be replicated to 0 nodes, instead of 1
[*] at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1639)
[*] at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:729)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
[*] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
[*] at java.lang.reflect.Method.invoke(Method.java:597)
[*] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:578)
[*] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1393)
[*] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1389)
[*] at java.security.AccessController.doPrivileged(Native Method)
[*] at javax.security.auth.Subject.doAs(Subject.java:396)
[*] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1136)
[*] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1387)
[*]
[*] at org.apache.hadoop.ipc.Client.call(Client.java:1107)
[*] at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
[*] at $Proxy1.addBlock(Unknown Source)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
[*] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
[*] at java.lang.reflect.Method.invoke(Method.java:597)
[*] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:85)
[*] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:62)
[*] at $Proxy1.addBlock(Unknown Source)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:3686)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:3546)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2600(DFSClient.java:2749)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2989)
[*]
[*]13/03/14 16:24:24 WARN hdfs.DFSClient: Error Recovery for block null bad datanode nodes == null
[*]13/03/14 16:24:24 WARN hdfs.DFSClient: Could not get block locations. Source file "/test/core-site.xml" - Aborting...
[*]put: java.io.IOException: File /test/core-site.xml could only be replicated to 0 nodes, instead of 1
[*]13/03/14 16:24:24 ERROR hdfs.DFSClient: Failed to close file /test/core-site.xml
[*]org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /test/core-site.xml could only be replicated to 0 nodes, instead of 1
[*] at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1639)
[*] at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:729)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
[*] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
[*] at java.lang.reflect.Method.invoke(Method.java:597)
[*] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:578)
[*] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1393)
[*] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1389)
[*] at java.security.AccessController.doPrivileged(Native Method)
[*] at javax.security.auth.Subject.doAs(Subject.java:396)
[*] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1136)
[*] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1387)
[*]
[*] at org.apache.hadoop.ipc.Client.call(Client.java:1107)
[*] at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
[*] at $Proxy1.addBlock(Unknown Source)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
[*] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
[*] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
[*] at java.lang.reflect.Method.invoke(Method.java:597)
[*] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:85)
[*] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:62)
[*] at $Proxy1.addBlock(Unknown Source)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:3686)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:3546)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2600(DFSClient.java:2749)
[*] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2989)复制代码
查看文件系统状态:
[*]# hadoop dfsadmin -report
[*]Configured Capacity: 53664616448 (49.98 GB)
[*]Present Capacity: 28672 (28 KB)
[*]DFS Remaining: 0 (0 KB)
[*]DFS Used: 28672 (28 KB)
[*]DFS Used%: 100%
[*]Under replicated blocks: 0
[*]Blocks with corrupt replicas: 0
[*]Missing blocks: 0
[*]
[*]-------------------------------------------------
[*]Datanodes available: 1 (1 total, 0 dead)
[*]
[*]Name: 192.168.1.49:50010
[*]Decommission Status : Normal
[*]Configured Capacity: 53664616448 (49.98 GB)
[*]DFS Used: 28672 (28 KB)
[*]Non DFS Used: 53664587776 (49.98 GB)
[*]DFS Remaining: 0(0 KB)
[*]DFS Used%: 0%
[*]DFS Remaining%: 0%
[*]Last contact: Thu Mar 14 16:50:24 CST 2013复制代码
这个状态很奇怪,datanode能识别,磁盘状态时正常的,但是文件系统容量不不符合逻辑,请问这是什么原因呢吗? 把文件的relication 设置成1 试试,默认是三块数据块,而你只有一台电脑。
try 一下吧。 看你dfsadmin -report的结果,最直观的感觉是这个datanode上没有剩余空间了都被非hdfs的文件占用完了 以前遇到过,貌似是datanode不正常启动导致的,通过重新启动所有节点处理,可以试试。
页:
[1]