读取hbase数据到hdfs api 报错 网上搜不到 求解 错误java.lang.NoSuchFieldError...
本帖最后由 o13674976542 于 2018-11-5 10:42 编辑错误信息:Exception in thread "main" java.lang.NoSuchFieldError: DEFAULT_MR_AM_ADMIN_USER_ENV
package com.zzti.sq.day1101shenmegui.wtf;
import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.hbase.HBaseConfiguration;import org.apache.hadoop.hbase.client.Scan;import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class ReadHBaseDataMR { private static final String ZK_CONNECT_KEY = "hbase.zookeeper.quorum"; private static final String ZK_CONNECT_VALUE = "master:2181"; private static final String TABLE = "user_info";
public static void main(String[] args) throws Exception { System.setProperty("hadoop.home.dir", "/Users/songqi/RuntimeEnvironment/hadoop-2.7.6HA");// System.setProperty("HBASE_HOME", "/Users/songqi/RuntimeEnvironment/hbase-1.2.6"); Configuration conf = HBaseConfiguration.create(); conf.set(ZK_CONNECT_KEY, ZK_CONNECT_VALUE); Job job = Job.getInstance(conf); job.setJarByClass(ReadHBaseDataMR.class);// job.setJar("/Users/songqi/Desktop/wtf.jar"); Scan scan= new Scan(); TableMapReduceUtil.initTableMapperJob( TABLE, scan, ReadHbaseDataMR_Mapper.class, Text.class, NullWritable.class, job, false); job.setNumReduceTasks(0); FileOutputFormat.setOutputPath(job, new Path("/hbase_mr/result07")); boolean isDone = job.waitForCompletion(true); System.out.println(isDone); }}
// class TableMapper<KEYOUT, VALUEOUT> // extends Mapper<ImmutableBytesWritable, Result, KEYOUT, VALUEOUT>
mapper类:
package com.zzti.sq.day1101shenmegui.wtf;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
public class ReadHbaseDataMR_Mapper extends TableMapper<Text, NullWritable>{
Text outKey = new Text();
@Override
protected void map(ImmutableBytesWritable key,
Result result,
Context context)
throws IOException, InterruptedException {
List<Cell> cells = result.listCells();
for(Cell c: cells){
String rowkey = Bytes.toString(CellUtil.cloneRow(c));
String family = Bytes.toString(CellUtil.cloneFamily(c));
String column = Bytes.toString(CellUtil.cloneQualifier(c));
String value = Bytes.toString(CellUtil.cloneValue(c));
long timestamp = c.getTimestamp();
outKey.set(rowkey + "\t" + family + "\t" + column + "\t" + value + "\t" + timestamp);
context.write(outKey, NullWritable.get());
}
}
}
以下是报错信息:
只有这一点
以下是控制台输出的所有信息 太长发不出来 基本都是这样 只在最后有个错
DEBUG - Call: getFileInfo took 2msDEBUG - Creating setup context, jobSubmitDir url is scheme: "hdfs" host: "sq" port: 8020 file: "/tmp/hadoop-yarn/staging/songqi/.staging/job_1541197362274_0007"DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #22DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #22DEBUG - Call: getFileInfo took 2msDEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #23DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #23DEBUG - Call: getFileInfo took 2ms INFO - Job jar is not present. Not adding any jar to the list of resources.DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #24DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #24DEBUG - Call: getFileInfo took 2msDEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #25DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #25DEBUG - Call: getFileInfo took 2msDEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #26DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #26DEBUG - Call: getFileInfo took 2msDEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #27DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #27DEBUG - Call: getFileInfo took 2msDEBUG - Command to launch container for ApplicationMaster is : $JAVA_HOME/bin/java -Djava.io.tmpdir=$PWD/tmp -Dlog4j.configuration=container-log4j.properties -Dyarn.app.container.log.dir=<LOG_DIR> -Dyarn.app.container.log.filesize=0 -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog-Xmx1024m org.apache.hadoop.mapreduce.v2.app.MRAppMaster 1><LOG_DIR>/stdout 2><LOG_DIR>/stderr INFO - Cleaning up the staging area /tmp/hadoop-yarn/staging/songqi/.staging/job_1541197362274_0007DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi sending #28DEBUG - IPC Client (225290371) connection to master/10.37.129.100:9000 from songqi got value #28DEBUG - Call: delete took 9ms
Exception in thread "main" java.lang.NoSuchFieldError: DEFAULT_MR_AM_ADMIN_USER_ENV at org.apache.hadoop.mapred.YARNRunner.createApplicationSubmissionContext(YARNRunner.java:480) at org.apache.hadoop.mapred.YARNRunner.submitJob(YARNRunner.java:296) at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:432) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1285) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1282) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1614) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1282) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1303) at com.zzti.sq.day1101shenmegui.wtf.ReadHBaseDataMR.main(ReadHBaseDataMR.java:45)DEBUG - stopping client from cache: org.apache.hadoop.ipc.Client@341b80b2
求解求解,很奇怪啊 集群我确定也是没问题的
1036329084
出现这个问题,一般为版本混合或则升级了等造成的。也就是说不是程序的问题,而是版本造成的。
yaojiank 发表于 2018-11-5 12:50
出现这个问题,一般为版本混合或则升级了等造成的。也就是说不是程序的问题,而是版本造成的。
您好,我同学的版本和我一样都是 hbase 1.2.6hadoop 2.7.6 我的环境为mac 下 的 虚拟机同学的是windows平台 都为maven环境 请问有没有可能别的问题? 比如centos 版本?我的是 7 他的是 6但是这个不应该有这种问题吧 😓
本帖最后由 阿飞 于 2018-11-5 20:11 编辑
o13674976542 发表于 2018-11-5 16:23
您好,我同学的版本和我一样都是 hbase 1.2.6hadoop 2.7.6 我的环境为mac 下 的 虚拟机同学的是w ...
为何会有两个人?
跟操作系统没关系的。一般跟大数据组件有关系。或则版本不兼容,冲突等造成的。
你本地的开发环境与集群环境是否一致。
是本地跑程序,还是远程跑程序。
阿飞 发表于 2018-11-5 20:06
为何会有两个人?
跟操作系统没关系的。一般跟大数据组件有关系。或则版本不兼容,冲突等造成的。
您好,是这样的,我说两个人意思是我俩环境一样他的没问题 我的有问题 。是本地提交到远程跑程序,代码中我也设置了 本地环境是从虚拟机拷贝下来的 所以是一致的,那我现在应该考虑更换hbase或者hadoop 版本来解决问题?
另外 想问您
您应该工作很久了,请问如果在公司遇到这种没头绪的问题该怎么办,根本不知道从何下手啊
本帖最后由 阿飞 于 2018-11-6 09:27 编辑
o13674976542 发表于 2018-11-6 08:56
您好,是这样的,我说两个人意思是我俩环境一样他的没问题 我的有问题 。是本地提交到远程跑程序 ...
其它不用考虑,就当练练看,找你们两个的区别在什么地方。
阿飞 发表于 2018-11-6 09:26
其它不用考虑,就当练练看,找你们两个的区别在什么地方。
好的谢谢您
页:
[1]