首页 Hive基础操作
文章
取消

Hive基础操作

创建数据库

cearte database [if not exists] db.hive;

表的相关操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
-- 创建表
create table [if not exists] student(
    name string,
    age int,
    score int)
row format delimited fileds terminated by '\t';

-- 创建子表——从表中提取出所需要的字段
create table if not exists db_hive.emp_cats
as
select name, age, from student;

-- 清除表数据,但不删除表
truncate table dept_cats;

-- 建立表结构相同的表
create table if not exists student_like
like
test.student;

-- 修改表的名称
alter table dept_like rename to dept_like_rename;

-- 删除表(好像会卡住
drop table if exists dept_like_rename;

-- 创建外部表
-- 外部表在删除时不会删掉内置的数据
create external table if not exists db_hive.emp_external(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int
)
row format delimited fields terminated by '\t'
location '/user/root/hive/warehouse/emp_external';

-- 建立二分区表
set hive.exec.dynamic.partition.mode=nonstrict; # 允许动态分区
-- 静态分区 
create external table if not exists db_hive.emp_partition2(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int
)
PARTITIONED BY (month string, day string)
row format delimited fields terminated by '\t'
location '/user/root/hive/warehouse/emp_external';

load data local inpath '/root/data_emp/emp.txt' into table db_hive.emp_partition2
partition (month='201810', day='21');

SELECT * FROM emp_partition2 where month='201810' and day='21';
-- 动态分区(分区的字段必须在后面)
FROM raw_access_log 
INSERT OVERWRITE TABLE
partitioned_raw_access_log
PARTITION(year,month,day)
SELECT ip.....year,month,day
-- 混合模式
FROM raw_access_log 
INSERT OVERWRITE TABLE
partitioned_raw_access_log
PARTITION(year="2007",month,day)
SELECT ip.....month,day

-- 分桶模式
FROM table_test
CLUSTERED BY (column_name,...) INTO 8 BUCKETS//与分区不同的是列名必须是已经存在的列名

-- 将数据导入分桶表
-- 首先设置reduce数量和Buckets数量一直,即最终决定产生多少个文件
-- hive.enforce.sorting和hive.enforce.bucketing设置为true,免了dirtribute by & sort by
SET hive.enforce.bucketing=true;
SET mapred.reduce.tasks=200;
FROM table_fa
INSERT OVERWRITE TABLE table_son
SELECT ...,request_date
WHERE ...
[DISTRIBUTE BY ...][SORT BY ...]

-- 修复分区
MSCK REPAIR TABLE table_name;

四个by

1
2
3
4
5
6
7
8
9
10
11
12
13
-- order by 全局排序,仅仅只有一个reducer
	select * from emp order by empno desc;
-- sort by 对每一个reduce内部进行排序,全局结果集不是排序的
	set mapreduce.job.reduces=3;
	select * from emp sort by empno asc;
	insert overwrite local directory '/opt/datas/sortby_res' select * from emp sort by empno asc;
-- distribute by 分区,类似mapreduce中partitioner,对数据分区,
	通常结合sort by使用
	insert overwrite local directory '/opt/datas/distby_res' select * from emp
	distribute by deptno sort by empno asc;
-- cluster by 当distribute by的字段和sort by的字段相同时就可以用cluster by代替
	insert overwrite local directory '/opt/datas/clusterby_res' select * from emp
	cluster by empno asc;

获取表的信息

1
2
3
4
-- 获取基本信息
desc student;
-- 获取详细信息
desc formatted student

数据导入

将文件信息导入
1
2
3
4
# 将本地数据信息载入数据表
load data local inpath '/data/student.txt' into table db_hive.student;
# 将HDFS数据载入数据表
load data inpath '/student.txt' into table db_hive.student;
将表中信息筛筛选导入
1
2
3
4
hive> insert into table test
    > partition (age='25')
    > select id, name, tel
    > from wyp;

数据导出

1
2
3
4
5
6
7
8
9
10
11
	# 1.常见导出insert overwrite local directory '/opt/datas/test.txt' select * from emp;
	# 2.定义格式
insert overwrite local directory '/opt/datas/test1.txt' 
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY '\n'
select * from emp;
	# 3.hive -e "select * from ..." > /opt/datas/xx.txt
	# 4.保存到HDFS上
	insert overwrite directory '' 
		ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY '\n'
		select * from emp_partition;
	5.sqoop
本文由作者按照 CC BY 4.0 进行授权