<description>Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects
such as table, database</description>
</property>
<property>
<name>hive.default.fileformat</name>
<value>TextFile</value>
<description>Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS
<TEXTFILE|SEQUENCEFILE> to override</description>
</property>
<property>
<name>hive.map.aggr</name>
<value>true</value>
<description>Whether to use map-side aggregation in Hive Group By queries</description>
</property>
<property>
<name>hive.groupby.skewindata</name>
<value>false</value>
<description>Whether there is skew in data to optimize group by queries</description>
</property>
<property>
<name>hive.groupby.mapaggr.checkinterval</name>
<value>100000</value>
<description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
</property>
<property>
<name>hive.mapred.local.mem</name>
<value>0</value>
<description>For local mode, memory of the mappers/reducers</description>
</property>
<property>
<name>hive.map.aggr.hash.percentmemory</name>
<value>0.5</value>
<description>Portion of total memory to be used by map-side grup aggregation hash table</description>
</property>
<property>
<name>hive.map.aggr.hash.min.reduction</name>
<value>0.5</value>
<description>Hash aggregation will be turned off if the ratio between hash
table size and input rows is bigger than this number. Set to 1 to make sure
hash aggregation is never turned off.</description>
</property>
<property>
<name>hive.optimize.cp</name>
<value>true</value>
<description>Whether to enable column pruner</description>
</property>
<property>
<name>hive.optimize.ppd</name>
<value>true</value>
<description>Whether to enable predicate pushdown</description>
</property>
<property>
<name>hive.optimize.pruner</name>
<value>true</value>
<description>Whether to enable the new partition pruner which depends on predicate pushdown. If this is disabled,
the old partition pruner which is based on AST will be enabled.</description>
</property>
<property>
<name>hive.join.emit.interval</name>
<value>1000</value>
<description>How many rows in the right-most join operand Hive should buffer before emitting the join result. </description>
</property>
<property>
<name>hive.mapred.mode</name>
<value>nonstrict</value>
<description>The mode in which the hive operations are being performed. In strict mode, some risky queries are not allowed to run</description>
</property>
<property>
<name>hive.exec.script.maxerrsize</name>
<value>100000</value>
<description>Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). This prevents runaway scripts from filling logs partitions to
capacity </description>
</property>
<property>
<name>hive.exec.compress.output</name>
<value>false</value>
<description> This controls whether the final outputs of a query (to a local/hdfs file or a hive table) is compressed. The compression codec and other options are
determined from hadoop config variables mapred.output.compress* </description>
</property>
<property>
<name>hive.exec.compress.intermediate</name>
<value>false</value>
<description> This controls whether intermediate files produced by hive between multiple map-reduce jobs are compressed. The compression codec and other options are
determined from hadoop config variables mapred.output.compress* </description>
</property>
<property>
<name>hive.hwi.listen.host</name>
<value>0.0.0.0</value>
<description>This is the host address the Hive Web Interface will listen on</description>
</property>
<property>
<name>hive.hwi.listen.port</name>
<value>9999</value>
<description>This is the port the Hive Web Interface will listen on</description>
</property>
<property>
<name>hive.hwi.war.file</name>
<value>${HIVE_HOME}/lib/hive-hwi.war</value>
<description>This is the WAR file with the jsp content for Hive Web Interface</description>
</property>
<property>
<name>hive.exec.pre.hooks</name>
<value></value>
<description>Pre Execute Hook for Tests</description>
</property>
<property>
<name>hive.merge.mapfiles</name>
<value>true</value>
<description>Merge small files at the end of a map-only job</description>
</property>
<property>
<name>hive.merge.mapredfiles</name>
<value>false</value>
<description>Merge small files at the end of any job(map only or map-reduce)</description>
</property>
<property>
<name>hive.heartbeat.interval</name>
<value>1000</value>
<description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
</property>
<property>
<name>hive.merge.size.per.task</name>
<value>256000000</value>
<description>Size of merged files at the end of the job</description>
</property>
<property>
<name>hive.script.auto.progress</name>
<value>false</value>
<description>Whether Hive Tranform/Map/Reduce Clause should automatically send progress information to TaskTracker to avoid the task getting killed because of
inactivity. Hive sends progress information when the script is outputting to stderr. This option removes the need of periodically producing stderr messages, but users
should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker. </description>
4) test it :hive> [root@boss ~]# hive
Hive history file=/tmp/root/hive_job_log_root_201001162330_322775424.txt
hive> show tables;
OK
Time taken: 6.008 seconds
hive> create table test(id bigint);
OK
Time taken: 0.495 seconds
hive>
5)login into oracle,check if autocreate some tables;
SQL> 高手二: [Prerequisite]
hadoop, hive, sqoop installed
ojdbc6.jar was in directory /usr/lib/sqoop/lib (ojdbc6.jar is for JDK 1.6)
[Steps] 1. Install oracle 11g for 32bit on a windows 32bit vm (windows server 2003) 2. Configure local listener(本地监听服务) in oracle Net Manager, add a listener 3. Load Oracle table to Hive table via Sqoop
In Oracle SQL Plus, check all tables owned by current user SQL> select unique tname from col;
We choose table "DEPT" to migrate.
In cluster node
$ hive
$ show tables;
$ quit;
$ sqoop import --connect jdbc:oracle:thin:@IPAddressOfNodeInstallOracle:1521:orcl --username SCOTT --password 123456 --table DEPT -m 1 --hive-import --hive-table "HIVETEST"
NOTICE: IPAddressOfNodeInstallOracle is for windows run oracle(ex: 172.16.3.4); username and password are configured in oracle;tablename and username should be upper case
$ show tables;
$ select * from hivetest;
建表过程
hive> CREATE TABLE pokes1(foo INT, bar STRING);
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:javax.jdo.JDODataStoreException: An exception was thrown while adding/validating class(es) : ORA-01754: a table may contain only one column of type LONG
java.sql.SQLSyntaxErrorException: ORA-01754: a table may contain only one column of type LONG
at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:447)
at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:396)
at oracle.jdbc.driver.T4C8Oall.processError(T4C8Oall.java:951)
at oracle.jdbc.driver.T4CTTIfun.receive(T4CTTIfun.java:513)
at oracle.jdbc.driver.T4CTTIfun.doRPC(T4CTTIfun.java:227)
at oracle.jdbc.driver.T4C8Oall.doOALL(T4C8Oall.java:531)
at oracle.jdbc.driver.T4CStatement.doOall8(T4CStatement.java:195)
at oracle.jdbc.driver.T4CStatement.executeForRows(T4CStatement.java:1036)
at oracle.jdbc.driver.OracleStatement.doExecuteWithTimeout(OracleStatement.java:1336)
at oracle.jdbc.driver.OracleStatement.executeInternal(OracleStatement.java:1916)
at oracle.jdbc.driver.OracleStatement.execute(OracleStatement.java:1878)
at oracle.jdbc.driver.OracleStatementWrapper.execute(OracleStatementWrapper.java:318)
at com.jolbox.bonecp.StatementHandle.execute(StatementHandle.java:254)
at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatement(AbstractTable.java:760)
at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatementList(AbstractTable.java:711)
at org.datanucleus.store.rdbms.table.AbstractTable.create(AbstractTable.java:425)
at org.datanucleus.store.rdbms.table.AbstractTable.exists(AbstractTable.java:488)
at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.performTablesValidation(RDBMSStoreManager.java:3380)
at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.addClassTablesAndValidate(RDBMSStoreManager.java:3190)
at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.run(RDBMSStoreManager.java:2841)
at org.datanucleus.store.rdbms.AbstractSchemaTransaction.execute(AbstractSchemaTransaction.java:122)
at org.datanucleus.store.rdbms.RDBMSStoreManager.addClasses(RDBMSStoreManager.java:1605)
at org.datanucleus.store.AbstractStoreManager.addClass(AbstractStoreManager.java:954)
at org.datanucleus.store.rdbms.RDBMSStoreManager.getDatastoreClass(RDBMSStoreManager.java:679)
at org.datanucleus.store.rdbms.RDBMSStoreManager.getPropertiesForGenerator(RDBMSStoreManager.java:2045)
at org.datanucleus.store.AbstractStoreManager.getStrategyValue(AbstractStoreManager.java:1365)
at org.datanucleus.ExecutionContextImpl.newObjectId(ExecutionContextImpl.java:3827)
at org.datanucleus.state.JDOStateManager.setIdentity(JDOStateManager.java:2571)
at org.datanucleus.state.JDOStateManager.initialiseForPersistentNew(JDOStateManager.java:513)
at org.datanucleus.state.ObjectProviderFactoryImpl.newForPersistentNew(ObjectProviderFactoryImpl.java:232)
at org.datanucleus.ExecutionContextImpl.newObjectProviderForPersistentNew(ExecutionContextImpl.java:1414)
at org.datanucleus.ExecutionContextImpl.persistObjectInternal(ExecutionContextImpl.java:2218)
at org.datanucleus.ExecutionContextImpl.persistObjectWork(ExecutionContextImpl.java:2065)
at org.datanucleus.ExecutionContextImpl.persistObject(ExecutionContextImpl.java:1913)
at org.datanucleus.ExecutionContextThreadedImpl.persistObject(ExecutionContextThreadedImpl.java:217)
at org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:727)
at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:752)
at org.apache.hadoop.hive.metastore.ObjectStore.createTable(ObjectStore.java:814)
at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)
at com.sun.proxy.$Proxy0.createTable(Unknown Source)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_core(HiveMetaStore.java:1416)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_with_environment_context(HiveMetaStore.java:1449)
at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
at com.sun.proxy.$Proxy3.create_table_with_environment_context(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9200)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9184)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
at org.datanucleus.api.jdo.NucleusJDOHelper.getJDOExceptionForNucleusException(NucleusJDOHelper.java:451)
at org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:732)
at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:752)
at org.apache.hadoop.hive.metastore.ObjectStore.createTable(ObjectStore.java:814)
at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)
at com.sun.proxy.$Proxy0.createTable(Unknown Source)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_core(HiveMetaStore.java:1416)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_with_environment_context(HiveMetaStore.java:1449)
at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
at com.sun.proxy.$Proxy3.create_table_with_environment_context(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9200)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9184)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
NestedThrowablesStackTrace:
java.sql.SQLSyntaxErrorException: ORA-01754: a table may contain only one column of type LONG
at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:447)
at oracle.jdbc.driver.T4CTTIoer.processError(T4CTTIoer.java:396)
at oracle.jdbc.driver.T4C8Oall.processError(T4C8Oall.java:951)
at oracle.jdbc.driver.T4CTTIfun.receive(T4CTTIfun.java:513)
at oracle.jdbc.driver.T4CTTIfun.doRPC(T4CTTIfun.java:227)
at oracle.jdbc.driver.T4C8Oall.doOALL(T4C8Oall.java:531)
at oracle.jdbc.driver.T4CStatement.doOall8(T4CStatement.java:195)
at oracle.jdbc.driver.T4CStatement.executeForRows(T4CStatement.java:1036)
at oracle.jdbc.driver.OracleStatement.doExecuteWithTimeout(OracleStatement.java:1336)
at oracle.jdbc.driver.OracleStatement.executeInternal(OracleStatement.java:1916)
at oracle.jdbc.driver.OracleStatement.execute(OracleStatement.java:1878)
at oracle.jdbc.driver.OracleStatementWrapper.execute(OracleStatementWrapper.java:318)
at com.jolbox.bonecp.StatementHandle.execute(StatementHandle.java:254)
at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatement(AbstractTable.java:760)
at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatementList(AbstractTable.java:711)
at org.datanucleus.store.rdbms.table.AbstractTable.create(AbstractTable.java:425)
at org.datanucleus.store.rdbms.table.AbstractTable.exists(AbstractTable.java:488)
at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.performTablesValidation(RDBMSStoreManager.java:3380)
at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.addClassTablesAndValidate(RDBMSStoreManager.java:3190)
at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.run(RDBMSStoreManager.java:2841)
at org.datanucleus.store.rdbms.AbstractSchemaTransaction.execute(AbstractSchemaTransaction.java:122)
at org.datanucleus.store.rdbms.RDBMSStoreManager.addClasses(RDBMSStoreManager.java:1605)
at org.datanucleus.store.AbstractStoreManager.addClass(AbstractStoreManager.java:954)
at org.datanucleus.store.rdbms.RDBMSStoreManager.getDatastoreClass(RDBMSStoreManager.java:679)
at org.datanucleus.store.rdbms.RDBMSStoreManager.getPropertiesForGenerator(RDBMSStoreManager.java:2045)
at org.datanucleus.store.AbstractStoreManager.getStrategyValue(AbstractStoreManager.java:1365)
at org.datanucleus.ExecutionContextImpl.newObjectId(ExecutionContextImpl.java:3827)
at org.datanucleus.state.JDOStateManager.setIdentity(JDOStateManager.java:2571)
at org.datanucleus.state.JDOStateManager.initialiseForPersistentNew(JDOStateManager.java:513)
at org.datanucleus.state.ObjectProviderFactoryImpl.newForPersistentNew(ObjectProviderFactoryImpl.java:232)
at org.datanucleus.ExecutionContextImpl.newObjectProviderForPersistentNew(ExecutionContextImpl.java:1414)
at org.datanucleus.ExecutionContextImpl.persistObjectInternal(ExecutionContextImpl.java:2218)
at org.datanucleus.ExecutionContextImpl.persistObjectWork(ExecutionContextImpl.java:2065)
at org.datanucleus.ExecutionContextImpl.persistObject(ExecutionContextImpl.java:1913)
at org.datanucleus.ExecutionContextThreadedImpl.persistObject(ExecutionContextThreadedImpl.java:217)
at org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:727)
at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:752)
at org.apache.hadoop.hive.metastore.ObjectStore.createTable(ObjectStore.java:814)
at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)
at com.sun.proxy.$Proxy0.createTable(Unknown Source)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_core(HiveMetaStore.java:1416)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_table_with_environment_context(HiveMetaStore.java:1449)
at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
at com.sun.proxy.$Proxy3.create_table_with_environment_context(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9200)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_table_with_environment_context.getResult(ThriftHiveMetastore.java:9184)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
)
hive-site.xml 配置文件如下
<configuration>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>hdfs://ktbigdata1:9000/hive</value>
<description>location of default database for thewarehouse</description>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>hdfs://ktbigdata1:9000/hive/scratchdir</value>
<description>Scratch space for Hive jobs</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/home/hadoop/apache-hive-1.2.1-bin/logs</value>
<description>
Location of Hive run time structured log file
</description>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name> #oracle1
<value>jdbc:oracle:thin:@192.168.3.107:1521/ORCL</value>
<description>JDBC connect string for a JDBCmetastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name> #oracle2
<value>oracle.jdbc.OracleDriver</value>
<description>Driver class name for aJDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name> #oracle3 oracle中的用户名
<value>username</value>
<description>username to use against metastoredatabase</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name> #oracle4 oracle中的密码
<value>password</value>
<description>password to use against metastoredatabase</description>
</property>
<property>
<name>hive.zookeeper.quorum</name>
<value>192.168.100.141,192.168.100.142,192.168.100.143</value>
<description>
List of ZooKeeper servers to talk to. This is needed for:
1. Read/write locks - when hive.lock.manager is set to
org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
2. When HiveServer2 supports service discovery via Zookeeper.
3. For delegation token storage if zookeeper store is used, if
hive.cluster.delegation.token.store.zookeeper.connectString is not set
</description>
</property>
## hive.aux.jars.path 可以不用
<property>
<name>hive.aux.jars.path</name>
<value>file:///home/hadoop/apache-hive-1.2.1-bin/lib/hive-hbase-handler*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/protobuf-java-*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/hbase-client*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/hbase-common-*hadoop2.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/zookeeper-*.jar,
file:///home/hadoop/apache-hive-1.2.1-bin/lib/guava-*.jar
file:///home/hadoop/apache-hive-1.2.1-bin/lib/mongo-hadoop-hive-1.4-rc0.jar</value>
</property>
#客户必须增加下面代码
<property>
<name>hive.metastore.uris</name>
<value>thrift://ktbigdata1:9083</value>
</property>
</configuration>