mysql dump -h172.20.0.67 -ucdhhive -p111111 cdhhive > /tmp/database-backup.sql
复制代码
进入hive服务->更新hive metastore
database scheme
复制代码
更新oozie sharelib:oozie->install
oozie share lib
创建 oozie user
sharelib
创建 oozie user
Dir
更新sqoop:进入sqoop服务->update
sqoop
更新sqoop2 server
更新spark(略,可先卸载原来版本,升级后直接安装新版本)
启动集群所有服务:
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.authorize.AuthorizationException): User hbase/ip-10-1-33-20.ec2.internal@YEAHMOBI.COM (auth:KERBEROS) is not authorized for protocol interface org.apache.hadoop.hdfs.protocol.ClientProtocol, expected client Kerberos principal is null
at org.apache.hadoop.ipc.Client.call(Client.java:1409)
at org.apache.hadoop.ipc.Client.call(Client.java:1362)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy15.setSafeMode(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy15.setSafeMode(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.setSafeMode(ClientNamenodeProtocolTranslatorPB.java:594)
at org.apache.hadoop.hdfs.DFSClient.setSafeMode(DFSClient.java:2224)
at org.apache.hadoop.hdfs.DistributedFileSystem.setSafeMode(DistributedFileSystem.java:993)
at org.apache.hadoop.hdfs.DistributedFileSystem.setSafeMode(DistributedFileSystem.java:977)
at org.apache.hadoop.hbase.util.FSUtils.isInSafeMode(FSUtils.java:432)
at org.apache.hadoop.hbase.util.FSUtils.waitOnSafeMode(FSUtils.java:851)
at org.apache.hadoop.hbase.master.MasterFileSystem.checkRootDir(MasterFileSystem.java:435)
at org.apache.hadoop.hbase.master.MasterFileSystem.createInitialFileSystemLayout(MasterFileSystem.java:146)
at org.apache.hadoop.hbase.master.MasterFileSystem.<init>(MasterFileSystem.java:127)
at org.apache.hadoop.hbase.master.HMaster.finishInitialization(HMaster.java:789)
at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:606)
14/11/27 12:38:26 INFO zookeeper.ClientCnxn: Socket connection established to ip-10-1-33-24.ec2.internal/10.1.33.24:2181, initiating session
14/11/27 12:38:26 INFO zookeeper.ClientCnxn: Session establishment complete on server ip-10-1-33-24.ec2.internal/10.1.33.24:2181, sessionid = 0x549ef6088f20309, negotiated timeout = 60000
14/11/27 12:38:41 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
14/11/27 12:38:55 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
14/11/27 12:39:15 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
14/11/27 12:39:34 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
14/11/27 12:39:55 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
14/11/27 12:40:19 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
14/11/27 12:40:36 WARN ipc.RpcClient: Couldn't setup connection for hbase/ip-10-1-10-15.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-31.ec2.internal@YEAHMOBI.COM
Caused by: java.io.IOException: Couldn't setup connection for hbase/ip-10-1-33-20.ec2.internal@YEAHMOBI.COM to hbase/ip-10-1-34-32.ec2.internal@YEAHMOBI.COM
at org.apache.hadoop.hbase.ipc.RpcClient$Connection$1.run(RpcClient.java:821)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1614)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection.handleSaslConnectionFailure(RpcClient.java:796)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection.setupIOstreams(RpcClient.java:898)
at org.apache.hadoop.hbase.ipc.RpcClient.getConnection(RpcClient.java:1543)
at org.apache.hadoop.hbase.ipc.RpcClient.call(RpcClient.java:1442)
at org.apache.hadoop.hbase.ipc.RpcClient.callBlockingMethod(RpcClient.java:1661)
at org.apache.hadoop.hbase.ipc.RpcClient$BlockingRpcChannelImplementation.callBlockingMethod(RpcClient.java:1719)
at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$BlockingStub.execService(ClientProtos.java:30014)
at org.apache.hadoop.hbase.protobuf.ProtobufUtil.execService(ProtobufUtil.java:1623)
at org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel$1.call(RegionCoprocessorRpcChannel.java:93)
at org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel$1.call(RegionCoprocessorRpcChannel.java:90)
at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114)
... 31 more
Caused by: javax.security.sasl.SaslException: No common protection layer between client and server
at com.sun.security.sasl.gsskerb.GssKrb5Client.doFinalHandshake(GssKrb5Client.java:252)
at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:187)
at org.apache.hadoop.hbase.security.HBaseSaslRpcClient.saslConnect(HBaseSaslRpcClient.java:210)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection.setupSaslConnection(RpcClient.java:770)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection.access$600(RpcClient.java:357)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection$2.run(RpcClient.java:891)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection$2.run(RpcClient.java:888)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1614)
at org.apache.hadoop.hbase.ipc.RpcClient$Connection.setupIOstreams(RpcClient.java:888)
java.sql.SQLException: Could not open connection to jdbc:hive2://ip-10-1-33-22.ec2.internal:21050/ym_system;principal=impala/ip-10-1-33-22.ec2.internal@YEAHMOBI.COM: GSS initiate failed
at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:187)
at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:164)
at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:105)
at java.sql.DriverManager.getConnection(DriverManager.java:571)
at java.sql.DriverManager.getConnection(DriverManager.java:233)
at com.cloudera.example.ClouderaImpalaJdbcExample.main(ClouderaImpalaJdbcExample.java:37)
File system image contains an old layout version -51.
An upgrade to version -55 is required.
Please restart NameNode with the "-rollingUpgrade started" option if a rolling upgraded is already started; or restart NameNode with the "UPGRADE" to start a new upgrade.
复制代码
手动到master节点启动后,停掉,再通过web页面重启后正常
su - hdfs -c "hdfs --config /var/run/cloudera-scm-agent/process/XXX-hdfs-NAMENODE namenode -upgrade"
2.另一个可能出现的报错:
Get corrupt file blocks returned error: Cannot run listCorruptFileBlocks because replication queues have not been initialized.