hadoop编程,如何使用api获取hdfs整体信息
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
public class hdfstest01 {
public static void main(String[] agrs) throws Exception {
Configuration myConf = new Configuration();
myConf.set("fs.default.name", "hdfs://master.nj.hadoop:9000");
FileSystem myFS = FileSystem.get(myConf);
DistributedFileSystem dfs = new DistributedFileSystem();
DistributedFileSystem hdfs = (DistributedFileSystem) FileSystem.get(myConf);
DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();
System.out.println("Configured Capacity (单位字节):" + hdfs.getRawCapacity());
System.out.println("DFS Used (单位字节):" + hdfs.getRawUsed());
System.out.println("DFS Remaining (单位字节) :" + hdfs.getDiskStatus().getRemaining());
System.out.println("DFS Used% :更具上面的信息自己算百分比");
System.out.println("DFS Remaining% : 更具上面的信息自己算百分比");
System.out.println("Node信息参考下方代码27行");
System.out.println("Number of Under-Replicated Blocks :" + hdfs.getUnderReplicatedBlocksCount());
System.out.println("-----------------------------------");
//Nodes 信息
for (DatanodeInfo dataNode : dataNodeStats) {
System.out.println("-----------------------------------");
//上方的所有数据,都可以通过下面DatanodeInfo的方法累加
System.out.println(dataNode.getHostName() + "\t" + dataNode.getName() + "\t" + dataNode.getAdminState() + "\t"
+ dataNode.getDatanodeReport());
}
}
}
页:
[1]