建表
create table mydb.userinfo(name string,addressi string)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '/t'
LINES TERMINATED BY '/n'
STORED AS TEXTFILE;
创建分区表
CREATE TABLE mydb.userinfo --创建表
(col1 string, col2 date, col3 double),
partitioned by (datekey date), --可以多个字段的组合分区
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
Stored AS TEXTFILE;
数据导入到表mydb.userinfo中
load data local inpath "/home/dahaizi/data/userinfo.txt"
overwrite into table mydb.userinfo;
向表中插入数据
insert into table(col1,col2,col3) values(‘a’,’b’,’c’)
将查询的数据插入到已有的表中
INSERT INTO TABLE table_Name
PARTITION (DateKey),
SELECT col1,col2,col3,DateKey FROM otherTable
WHERE DATEKEY IN (‘2017-02-26′,’2013-06-12′,’2013-09-24’),
GROUP BY col1,col2,col3,DateKey
DISTRIBUTE BY DateKey
将查询的数据存储的hdfs目录中
insert overwrite directory '/jc_bdcqs/qsy'
row format delimited
fields terminated by ','
select * from zqs_gs_g60_0730_list;
!quit
HQL查询常用设置项
1)设置计算容错率(防止因计算过程出错而异常退出程序):set mapred.max.map.failures.percent=100;
2)限制查询输出文件的个数set mapred.reduce.tasks=1;
3) 控制最大reduce的数量,不会影响mapred.reduce.tasks的设置set hive.exec.reducers.max = 100;
4) 一个job会有多少个reducer来处理,默认为1Gset hive.exec.reducers.bytes.per.reducer = 1000000000;
设置动态分区
set hive.exec.dynamic.partition=true;(可通过这个语句查看:set hive.exec.dynamic.partition;),
set hive.exec.dynamic.partition.mode=nonstrict;
SET hive.exec.max.dynamic.partitions=100000;(如果自动分区数大于这个参数,将会报错),
SET hive.exec.max.dynamic.partitions.pernode=100000;
删除表
drop table tb_name;
或清空表truncate table table_name;
删除分区
ALTER TABLE table_Name DROP PARTITION (Datekey='20190606');
新增分区
alter table tb_name add partition (Datekey = ‘20190606’);
原创文章,作者:奋斗,如若转载,请注明出处:https://blog.ytso.com/194643.html