分享

将hive的元数据存到Oracle里各位高手总结

pig2 2013-12-23 12:58:04 发表于 总结型 [显示全部楼层] 回帖奖励 阅读模式 关闭右栏 4 22579
本帖最后由 pig2 于 2013-12-23 13:07 编辑

高手一:
1)create oracle user
sql> create user hive identified by hive identified by hive

     default tablespace hive;

sql> grant dba to hive;              [notice security]


2)edit hive/conf/hive-default.xml
  1. <?xml version="1.0"?>
  2. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  3. <configuration>
  4. <!-- Hive Configuration can either be stored in this file or in the hadoop configuration files -->
  5. <!-- that are implied by Hadoop setup variables. -->
  6. <!-- Aside from Hadoop setup variables - this file is provided as a convenience so that Hive -->
  7. <!-- users do not have to edit hadoop configuration files (that may be managed as a centralized -->
  8. <!-- resource). -->
  9. <!-- Hive Execution Parameters -->
  10. <property>
  11. <name>mapred.reduce.tasks</name>
  12. <value>-1</value>
  13. <description>The default number of reduce tasks per job. Typically set
  14. to a prime close to the number of available hosts. Ignored when
  15. mapred.job.tracker is "local". Hadoop set this to 1 by default, whereas hive uses -1 as its default value.
  16. By setting this property to -1, Hive will automatically figure out what should be the number of reducers.
  17. </description>
  18. </property>
  19. <property>
  20. <name>hive.exec.reducers.bytes.per.reducer</name>
  21. <value>1000000000</value>
  22. <description>size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers.</description>
  23. </property>
  24. <property>
  25. <name>hive.exec.reducers.max</name>
  26. <value>999</value>
  27. <description>max number of reducers will be used. If the one
  28. specified in the configuration parameter mapred.reduce.tasks is
  29. negative, hive will use this one as the max number of reducers when
  30. automatically determine number of reducers.</description>
  31. </property>
  32. <property>
  33. <name>hive.exec.scratchdir</name>
  34. <value>/tmp/hive-${user.name}</value>
  35. <description>Scratch space for Hive jobs</description>
  36. </property>
  37. <property>
  38. <name>hive.test.mode</name>
  39. <value>false</value>
  40. <description>whether hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename</description>
  41. </property>
  42. <property>
  43. <name>hive.test.mode.prefix</name>
  44. <value>test_</value>
  45. <description>if hive is running in test mode, prefixes the output table by this string</description>
  46. </property>
  47. <!-- If the input table is not bucketed, the denominator of the tablesample is determinied by the parameter below -->
  48. <!-- For example, the following query: -->
  49. <!-- INSERT OVERWRITE TABLE dest -->
  50. <!-- SELECT col1 from src -->
  51. <!-- would be converted to -->
  52. <!-- INSERT OVERWRITE TABLE test_dest -->
  53. <!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
  54. <property>
  55. <name>hive.test.mode.samplefreq</name>
  56. <value>32</value>
  57. <description>if hive is running in test mode and table is not bucketed, sampling frequency</description>
  58. </property>
  59. <property>
  60. <name>hive.test.mode.nosamplelist</name>
  61. <value></value>
  62. <description>if hive is running in test mode, dont sample the above comma seperated list of tables</description>
  63. </property>
  64. <property>
  65. <name>hive.metastore.local</name>
  66. <value>true</value>
  67. <description>controls whether to connect to remove metastore server or open a new metastore server in Hive Client JVM</description>
  68. </property>
  69. <property>
  70. <name>javax.jdo.option.ConnectionURL</name>
  71. <value>jdbc:oracle:thin:@192.168.1.101:1521/ORCL</value>
  72. <description>JDBC connect string for a JDBC metastore</description>
  73. </property>
  74. <property>
  75. <name>javax.jdo.option.ConnectionDriverName</name>
  76. <value>oracle.jdbc.driver.OracleDriver</value> 【就是第三步下载的ojdbc.jar】
  77. <description>Driver class name for a JDBC metastore</description>
  78. </property>
  79. <property>
  80. <name>javax.jdo.PersistenceManagerFactoryClass</name>
  81. <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
  82. <description>class implementing the jdo persistence</description>
  83. </property>
  84. <property>
  85. <name>javax.jdo.option.DetachAllOnCommit</name>
  86. <value>true</value>
  87. <description>detaches all objects from session so that they can be used after transaction is committed</description>
  88. </property>
  89. <property>
  90. <name>javax.jdo.option.NonTransactionalRead</name>
  91. <value>true</value>
  92. <description>reads outside of transactions</description>
  93. </property>
  94. <property>
  95. <name>javax.jdo.option.ConnectionUserName</name>
  96. <value>hive</value> [用户]
  97. <description>username to use against metastore database</description>
  98. </property>
  99. <property>
  100. <name>javax.jdo.option.ConnectionPassword</name>
  101. <value>hive</value> [密码]
  102. <description>password to use against metastore database</description>
  103. </property>
  104. <property>
  105. <name>datanucleus.validateTables</name>
  106. <value>false</value>
  107. <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
  108. </property>
  109. <property>
  110. <name>datanucleus.validateColumns</name>
  111. <value>false</value>
  112. <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
  113. </property>
  114. <property>
  115. <name>datanucleus.validateConstraints</name>
  116. <value>false</value>
  117. <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
  118. </property>
  119. <property>
  120. <name>datanucleus.storeManagerType</name>
  121. <value>rdbms</value>
  122. <description>metadata store type</description>
  123. </property>
  124. <property>
  125. <name>datanucleus.autoCreateSchema</name>
  126. <value>true</value>
  127. <description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
  128. </property>
  129. <property>
  130. <name>datanucleus.autoStartMechanismMode</name>
  131. <value>checked</value>
  132. <description>throw exception if metadata tables are incorrect</description>
  133. </property>
  134. <property>
  135. <name>datancucleus.transactionIsolation</name>
  136. <value>read-committed</value>
  137. <description></description>
  138. </property>
  139. <property>
  140. <name>datanuclues.cache.level2</name>
  141. <value>true</value>
  142. <description>use a level 2 cache. turn this off if metadata is changed independently of hive metastore server</description>
  143. </property>
  144. <property>
  145. <name>datanuclues.cache.level2.type</name>
  146. <value>SOFT</value>
  147. <description>SOFT=soft reference based cache, WEAK=weak reference based cache.</description>
  148. </property>
  149. <property>
  150. <name>hive.metastore.warehouse.dir</name>
  151. <value>/user/hive/warehouse</value>
  152. <description>location of default database for the warehouse</description>
  153. </property>
  154. <property>
  155. <name>hive.metastore.connect.retries</name>
  156. <value>5</value>
  157. <description>Number of retries while opening a connection to metastore</description>
  158. </property>
  159. <property>
  160. <name>hive.metastore.rawstore.impl</name>
  161. <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
  162. <description>Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects
  163. such as table, database</description>
  164. </property>
  165. <property>
  166. <name>hive.default.fileformat</name>
  167. <value>TextFile</value>
  168. <description>Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS
  169. <TEXTFILE|SEQUENCEFILE> to override</description>
  170. </property>
  171. <property>
  172. <name>hive.map.aggr</name>
  173. <value>true</value>
  174. <description>Whether to use map-side aggregation in Hive Group By queries</description>
  175. </property>
  176. <property>
  177. <name>hive.groupby.skewindata</name>
  178. <value>false</value>
  179. <description>Whether there is skew in data to optimize group by queries</description>
  180. </property>
  181. <property>
  182. <name>hive.groupby.mapaggr.checkinterval</name>
  183. <value>100000</value>
  184. <description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
  185. </property>
  186. <property>
  187. <name>hive.mapred.local.mem</name>
  188. <value>0</value>
  189. <description>For local mode, memory of the mappers/reducers</description>
  190. </property>
  191. <property>
  192. <name>hive.map.aggr.hash.percentmemory</name>
  193. <value>0.5</value>
  194. <description>Portion of total memory to be used by map-side grup aggregation hash table</description>
  195. </property>
  196. <property>
  197. <name>hive.map.aggr.hash.min.reduction</name>
  198. <value>0.5</value>
  199. <description>Hash aggregation will be turned off if the ratio between hash
  200. table size and input rows is bigger than this number. Set to 1 to make sure
  201. hash aggregation is never turned off.</description>
  202. </property>
  203. <property>
  204. <name>hive.optimize.cp</name>
  205. <value>true</value>
  206. <description>Whether to enable column pruner</description>
  207. </property>
  208. <property>
  209. <name>hive.optimize.ppd</name>
  210. <value>true</value>
  211. <description>Whether to enable predicate pushdown</description>
  212. </property>
  213. <property>
  214. <name>hive.optimize.pruner</name>
  215. <value>true</value>
  216. <description>Whether to enable the new partition pruner which depends on predicate pushdown. If this is disabled,
  217. the old partition pruner which is based on AST will be enabled.</description>
  218. </property>
  219. <property>
  220. <name>hive.join.emit.interval</name>
  221. <value>1000</value>
  222. <description>How many rows in the right-most join operand Hive should buffer before emitting the join result. </description>
  223. </property>
  224. <property>
  225. <name>hive.mapred.mode</name>
  226. <value>nonstrict</value>
  227. <description>The mode in which the hive operations are being performed. In strict mode, some risky queries are not allowed to run</description>
  228. </property>
  229. <property>
  230. <name>hive.exec.script.maxerrsize</name>
  231. <value>100000</value>
  232. <description>Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). This prevents runaway scripts from filling logs partitions to
  233. capacity </description>
  234. </property>
  235. <property>
  236. <name>hive.exec.compress.output</name>
  237. <value>false</value>
  238. <description> This controls whether the final outputs of a query (to a local/hdfs file or a hive table) is compressed. The compression codec and other options are
  239. determined from hadoop config variables mapred.output.compress* </description>
  240. </property>
  241. <property>
  242. <name>hive.exec.compress.intermediate</name>
  243. <value>false</value>
  244. <description> This controls whether intermediate files produced by hive between multiple map-reduce jobs are compressed. The compression codec and other options are
  245. determined from hadoop config variables mapred.output.compress* </description>
  246. </property>
  247. <property>
  248. <name>hive.hwi.listen.host</name>
  249. <value>0.0.0.0</value>
  250. <description>This is the host address the Hive Web Interface will listen on</description>
  251. </property>
  252. <property>
  253. <name>hive.hwi.listen.port</name>
  254. <value>9999</value>
  255. <description>This is the port the Hive Web Interface will listen on</description>
  256. </property>
  257. <property>
  258. <name>hive.hwi.war.file</name>
  259. <value>${HIVE_HOME}/lib/hive-hwi.war</value>
  260. <description>This is the WAR file with the jsp content for Hive Web Interface</description>
  261. </property>
  262. <property>
  263. <name>hive.exec.pre.hooks</name>
  264. <value></value>
  265. <description>Pre Execute Hook for Tests</description>
  266. </property>
  267. <property>
  268. <name>hive.merge.mapfiles</name>
  269. <value>true</value>
  270. <description>Merge small files at the end of a map-only job</description>
  271. </property>
  272. <property>
  273. <name>hive.merge.mapredfiles</name>
  274. <value>false</value>
  275. <description>Merge small files at the end of any job(map only or map-reduce)</description>
  276. </property>
  277. <property>
  278. <name>hive.heartbeat.interval</name>
  279. <value>1000</value>
  280. <description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
  281. </property>
  282. <property>
  283. <name>hive.merge.size.per.task</name>
  284. <value>256000000</value>
  285. <description>Size of merged files at the end of the job</description>
  286. </property>
  287. <property>
  288. <name>hive.script.auto.progress</name>
  289. <value>false</value>
  290. <description>Whether Hive Tranform/Map/Reduce Clause should automatically send progress information to TaskTracker to avoid the task getting killed because of
  291. inactivity. Hive sends progress information when the script is outputting to stderr. This option removes the need of periodically producing stderr messages, but users
  292. should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker. </description>
  293. </property>
  294. </configuration>
复制代码
3)download oracle jdbc:------- ojdbc-14.jar

http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/htdocs/jdbc_10201.html

  

4) test it :hive> [root@boss ~]# hive
Hive history file=/tmp/root/hive_job_log_root_201001162330_322775424.txt
hive> show tables;
OK
Time taken: 6.008 seconds
hive> create table test(id bigint);
OK
Time taken: 0.495 seconds
hive>




5)login into oracle,check if autocreate some tables;

[oracle@boss ~]$ sqlplus hive/hive@192.168.1.101:1521/spgjmega

SQL*Plus: Release 10.2.0.1.0 - Production on Sat Jan 16 23:57:37 2010

Copyright (c) 1982, 2005, Oracle. All rights reserved.


Connected to:
Oracle Database 10g Enterprise Edition Release 10.2.0.1.0 - Production
With the Partitioning, OLAP and Data Mining options

SQL> show user;
USER is "HIVE"
SQL> select * from tab;

TNAME TABTYPE CLUSTERID
------------------------------ ------- ----------
NUCLEUS_TABLES TABLE
A TABLE
DBS TABLE
SEQUENCE_TABLE TABLE
SERDES TABLE
TBLS TABLE
SDS TABLE
PARTITION_KEYS TABLE
COLUMNS TABLE
BUCKETING_COLS TABLE
SD_PARAMS TABLE

TNAME TABTYPE CLUSTERID
------------------------------ ------- ----------
SORT_COLS TABLE
SERDE_PARAMS TABLE
TABLE_PARAMS TABLE

14 rows selected.

SQL> set line 1000
SQL> r
1* select * from TBLS

TBL_ID CREATE_TIME DB_ID LAST_ACCESS_TIME OWNER RETENTION SD_ID TBL_NAME
---------- ----------- ---------- ---------------- -------------------------------------------------------------------------------------------------------------------------------- ---------- ---------- --------------------------------------------------------------------------------------------------------------------------------
1 1263655926 1 0 root 0 1 test

SQL>
高手二:
[Prerequisite]
hadoop, hive, sqoop installed
ojdbc6.jar was in directory /usr/lib/sqoop/lib (ojdbc6.jar is for JDK 1.6)

[Steps]
1. Install oracle 11g for 32bit on a windows 32bit vm (windows server 2003)
2. Configure local listener(本地监听服务) in  oracle Net Manager, add a listener
3. Load Oracle table to Hive table via Sqoop
In Oracle SQL Plus, check all tables owned by current user SQL>  select unique tname from col;
We choose table "DEPT" to migrate.

In cluster node

$ hive
$ show tables;
$ quit;

$ sqoop import --connect jdbc:oracle:thin:@IPAddressOfNodeInstallOracle:1521:orcl --username SCOTT --password 123456 --table DEPT -m 1 --hive-import --hive-table "HIVETEST"
NOTICE: IPAddressOfNodeInstallOracle is for windows run oracle(ex: 172.16.3.4); username and password are configured in oracle;tablename and username should be upper case
$ show tables;
$ select * from hivetest;



已有(4)人评论

跳转到指定楼层
祭淰♂ 发表于 2013-12-23 14:28:22
大哥,能给下配置步骤吗,我的为什么连接不上oracle呢
回复

使用道具 举报

pig2 发表于 2013-12-23 15:01:08
祭淰♂ 发表于 2013-12-23 14:28
大哥,能给下配置步骤吗,我的为什么连接不上oracle呢

你可以对照这个检查一下:
$ sqoop import --connect jdbc:oracle:thin:@IPAddressOfNodeInstallOracle:1521:orcl --username SCOTT --password 123456 --table DEPT -m 1 --hive-import --hive-table "HIVETEST"
连接不上的原因:
(1)你安装配置不当
(2)url不正确

回复

使用道具 举报

aqi915 发表于 2015-10-9 10:37:14
大哥好:
       我的环境是hive 1.2.1 在linux上,32位系统与环境,oracle 11g 在window 64位系统,32位环境(oracle,java等)。原选集成mysql是成功的。


       hive元数据oracle集成,要建表时报如下错误,自己解决了下,到hive-metastore-1.2.1.jar下的 package.jdo里 把 LONGVARCHAR 改为 clob但还是报如下错误,看了hive.log里的日志,我修改的没有成功,执行还是认到了两个long。
      <field name="viewOriginalText" default-fetch-group="false">
        <column name="VIEW_ORIGINAL_TEXT" jdbc-type="CLOB"/>
      </field>
      <field name="viewExpandedText" default-fetch-group="false">
        <column name="VIEW_EXPANDED_TEXT" jdbc-type="CLOB"/>
      </field>

建表过程
hive> CREATE TABLE pokes1(foo INT, bar STRING);
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:javax.jdo.JDODataStoreException: An exception was thrown while adding/validating class(es) : ORA-01754: a table may contain only one column of type LONG

java.sql.SQLSyntaxErrorException: ORA-01754: a table may contain only one column of type LONG

        at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:447)
        at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:396)
        at oracle.jdbc.driver.T4C8Oall.processError(T4C8Oall.java:951)
        at oracle.jdbc.driver.T4CTTIfun.receive(T4CTTIfun.java:513)
        at oracle.jdbc.driver.T4CTTIfun.doRPC(T4CTTIfun.java:227)
        at oracle.jdbc.driver.T4C8Oall.doOALL(T4C8Oall.java:531)
        at oracle.jdbc.driver.T4CStatement.doOall8(T4CStatement.java:195)
        at oracle.jdbc.driver.T4CStatement.executeForRows(T4CStatement.java:1036)
        at oracle.jdbc.driver.OracleStatement.doExecuteWithTimeout(OracleStatement.java:1336)
        at oracle.jdbc.driver.OracleStatement.executeInternal(OracleStatement.java:1916)
        at oracle.jdbc.driver.OracleStatement.execute(OracleStatement.java:1878)
        at oracle.jdbc.driver.OracleStatementWrapper.execute(OracleStatementWrapper.java:318)
        at com.jolbox.bonecp.StatementHandle.execute(StatementHandle.java:254)
        at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatement(AbstractTable.java:760)
        at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatementList(AbstractTable.java:711)
        at org.datanucleus.store.rdbms.table.AbstractTable.create(AbstractTable.java:425)
        at org.datanucleus.store.rdbms.table.AbstractTable.exists(AbstractTable.java:488)
        at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.performTablesValidation(RDBMSStoreManager.java:3380)
        at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.addClassTablesAndValidate(RDBMSStoreManager.java:3190)
        at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.run(RDBMSStoreManager.java:2841)
        at org.datanucleus.store.rdbms.AbstractSchemaTransaction.execute(AbstractSchemaTransaction.java:122)
        at org.datanucleus.store.rdbms.RDBMSStoreManager.addClasses(RDBMSStoreManager.java:1605)
        at org.datanucleus.store.AbstractStoreManager.addClass(AbstractStoreManager.java:954)
        at org.datanucleus.store.rdbms.RDBMSStoreManager.getDatastoreClass(RDBMSStoreManager.java:679)
        at org.datanucleus.store.rdbms.RDBMSStoreManager.getPropertiesForGenerator(RDBMSStoreManager.java:2045)
        at org.datanucleus.store.AbstractStoreManager.getStrategyValue(AbstractStoreManager.java:1365)
        at org.datanucleus.ExecutionContextImpl.newObjectId(ExecutionContextImpl.java:3827)
        at org.datanucleus.state.JDOStateManager.setIdentity(JDOStateManager.java:2571)
        at org.datanucleus.state.JDOStateManager.initialiseForPersistentNew(JDOStateManager.java:513)
        at org.datanucleus.state.ObjectProviderFactoryImpl.newForPersistentNew(ObjectProviderFactoryImpl.java:232)
        at org.datanucleus.ExecutionContextImpl.newObjectProviderForPersistentNew(ExecutionContextImpl.java:1414)
        at org.datanucleus.ExecutionContextImpl.persistObjectInternal(ExecutionContextImpl.java:2218)
        at org.datanucleus.ExecutionContextImpl.persistObjectWork(ExecutionContextImpl.java:2065)
        at org.datanucleus.ExecutionContextImpl.persistObject(ExecutionContextImpl.java:1913)
        at org.datanucleus.ExecutionContextThreadedImpl.persistObject(ExecutionContextThreadedImpl.java:217)
        at org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:727)
        at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:752)
        at org.apache.hadoop.hive.metastore.ObjectStore.createTable(ObjectStore.java:814)
        at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)
        at com.sun.proxy.$Proxy0.createTable(Unknown Source)
        at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_core(HiveMetaStore.java:1416)
        at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_with_environment_context(HiveMetaStore.java:1449)
        at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
        at com.sun.proxy.$Proxy3.create_table_with_environment_context(Unknown Source)
        at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9200)
        at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9184)
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
        at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

        at org.datanucleus.api.jdo.NucleusJDOHelper.getJDOExceptionForNucleusException(NucleusJDOHelper.java:451)
        at org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:732)
        at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:752)
        at org.apache.hadoop.hive.metastore.ObjectStore.createTable(ObjectStore.java:814)
        at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)
        at com.sun.proxy.$Proxy0.createTable(Unknown Source)
        at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_core(HiveMetaStore.java:1416)
        at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_with_environment_context(HiveMetaStore.java:1449)
        at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
        at com.sun.proxy.$Proxy3.create_table_with_environment_context(Unknown Source)
        at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9200)
        at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9184)
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
        at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)
NestedThrowablesStackTrace:
java.sql.SQLSyntaxErrorException: ORA-01754: a table may contain only one column of type LONG

        at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:447)
        at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:396)
        at oracle.jdbc.driver.T4C8Oall.processError(T4C8Oall.java:951)
        at oracle.jdbc.driver.T4CTTIfun.receive(T4CTTIfun.java:513)
        at oracle.jdbc.driver.T4CTTIfun.doRPC(T4CTTIfun.java:227)
        at oracle.jdbc.driver.T4C8Oall.doOALL(T4C8Oall.java:531)
        at oracle.jdbc.driver.T4CStatement.doOall8(T4CStatement.java:195)
        at oracle.jdbc.driver.T4CStatement.executeForRows(T4CStatement.java:1036)
        at oracle.jdbc.driver.OracleStatement.doExecuteWithTimeout(OracleStatement.java:1336)
        at oracle.jdbc.driver.OracleStatement.executeInternal(OracleStatement.java:1916)
        at oracle.jdbc.driver.OracleStatement.execute(OracleStatement.java:1878)
        at oracle.jdbc.driver.OracleStatementWrapper.execute(OracleStatementWrapper.java:318)
        at com.jolbox.bonecp.StatementHandle.execute(StatementHandle.java:254)
        at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatement(AbstractTable.java:760)
        at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatementList(AbstractTable.java:711)
        at org.datanucleus.store.rdbms.table.AbstractTable.create(AbstractTable.java:425)
        at org.datanucleus.store.rdbms.table.AbstractTable.exists(AbstractTable.java:488)
        at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.performTablesValidation(RDBMSStoreManager.java:3380)
        at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.addClassTablesAndValidate(RDBMSStoreManager.java:3190)
        at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.run(RDBMSStoreManager.java:2841)
        at org.datanucleus.store.rdbms.AbstractSchemaTransaction.execute(AbstractSchemaTransaction.java:122)
        at org.datanucleus.store.rdbms.RDBMSStoreManager.addClasses(RDBMSStoreManager.java:1605)
        at org.datanucleus.store.AbstractStoreManager.addClass(AbstractStoreManager.java:954)
        at org.datanucleus.store.rdbms.RDBMSStoreManager.getDatastoreClass(RDBMSStoreManager.java:679)
        at org.datanucleus.store.rdbms.RDBMSStoreManager.getPropertiesForGenerator(RDBMSStoreManager.java:2045)
        at org.datanucleus.store.AbstractStoreManager.getStrategyValue(AbstractStoreManager.java:1365)
        at org.datanucleus.ExecutionContextImpl.newObjectId(ExecutionContextImpl.java:3827)
        at org.datanucleus.state.JDOStateManager.setIdentity(JDOStateManager.java:2571)
        at org.datanucleus.state.JDOStateManager.initialiseForPersistentNew(JDOStateManager.java:513)
        at org.datanucleus.state.ObjectProviderFactoryImpl.newForPersistentNew(ObjectProviderFactoryImpl.java:232)
        at org.datanucleus.ExecutionContextImpl.newObjectProviderForPersistentNew(ExecutionContextImpl.java:1414)
        at org.datanucleus.ExecutionContextImpl.persistObjectInternal(ExecutionContextImpl.java:2218)
        at org.datanucleus.ExecutionContextImpl.persistObjectWork(ExecutionContextImpl.java:2065)
        at org.datanucleus.ExecutionContextImpl.persistObject(ExecutionContextImpl.java:1913)
        at org.datanucleus.ExecutionContextThreadedImpl.persistObject(ExecutionContextThreadedImpl.java:217)
        at org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:727)
        at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:752)
        at org.apache.hadoop.hive.metastore.ObjectStore.createTable(ObjectStore.java:814)
        at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)
        at com.sun.proxy.$Proxy0.createTable(Unknown Source)
        at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_core(HiveMetaStore.java:1416)
        at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_with_environment_context(HiveMetaStore.java:1449)
        at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
        at com.sun.proxy.$Proxy3.create_table_with_environment_context(Unknown Source)
        at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9200)
        at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9184)
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
        at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
        at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)
)


发现hive-metastore-1.2.1.jar下的 package.jdo里还有个,也只有一个这个类型,
     <field name="message">
        <column name="MESSAGE" jdbc-type="LONGVARCHAR"/>
      </field>
里面还有下面这样的类型,不懂有没有影响
<field name="longLowValue">
        <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
      </field>
      <field name="longHighValue">
        <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
      </field>
       <field name="doubleLowValue">
        <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
      </field>
      <field name="doubleHighValue">
        <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
      </field>

hive-site.xml 配置文件如下
<configuration>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>hdfs://ktbigdata1:9000/hive</value>
<description>location of default database for thewarehouse</description>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>hdfs://ktbigdata1:9000/hive/scratchdir</value>
<description>Scratch space for Hive jobs</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/home/hadoop/apache-hive-1.2.1-bin/logs</value>
<description>
Location of Hive run time structured log file
</description>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name> #oracle1
<value>jdbc:oracle:thin:@192.168.3.107:1521/ORCL</value>
<description>JDBC connect string for a JDBCmetastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>  #oracle2
<value>oracle.jdbc.OracleDriver</value>
<description>Driver class name for aJDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name> #oracle3 oracle中的用户名
<value>username</value>
<description>username to use against metastoredatabase</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name> #oracle4 oracle中的密码
<value>password</value>
<description>password to use against metastoredatabase</description>
</property>
<property>
<name>hive.zookeeper.quorum</name>
<value>192.168.100.141,192.168.100.142,192.168.100.143</value>
<description>
List of ZooKeeper servers to talk to. This is needed for:
1. Read/write locks - when hive.lock.manager is set to
org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
2. When HiveServer2 supports service discovery via Zookeeper.
3. For delegation token storage if zookeeper store is used, if
hive.cluster.delegation.token.store.zookeeper.connectString is not set
</description>
</property>
## hive.aux.jars.path 可以不用
<property>
<name>hive.aux.jars.path</name>
<value>file:///home/hadoop/apache-hive-1.2.1-bin/lib/hive-hbase-handler*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/protobuf-java-*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/hbase-client*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/hbase-common-*hadoop2.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/zookeeper-*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/guava-*.jar
file:///home/hadoop/apache-hive-1.2.1-bin/lib/mongo-hadoop-hive-1.4-rc0.jar</value>
</property>
#客户必须增加下面代码
<property>
<name>hive.metastore.uris</name>
<value>thrift://ktbigdata1:9083</value>
</property>
</configuration>



可以帮助排查下么,非常感谢!

回复

使用道具 举报

bianxq86 发表于 2016-8-10 18:41:04
aqi915 发表于 2015-10-9 10:37
大哥好:
       我的环境是hive 1.2.1 在linux上,32位系统与环境,oracle 11g 在window 64位系统,32位环 ...

今天碰到和你一样的问题,请问你解决了没。。

回复

使用道具 举报

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

关闭

推荐上一条 /2 下一条