1.准备本地txt文件
1 2 |
echo "hadoop hbase" > f1.txt
echo "hadoop hive" > f2.txt
|
2.启动hadoop,启动hive
3.创建数据库,创建文本表
use hive(创建数据库,选择数据库) create table if not exists wctext(line string); show talbes;
4.映射本地文件的数据到文本表中
load data local inpath '/home/hadoop/wc/f1.txt' into table wctext; load data local inpath '/home/hadoop/wc/f2.txt' into table wctext;
5.hql语句进行词频统计交将结果保存到结果表中。
select word,count(1) as count from (select explode(split(line,' ')) as word from wctext) w group by word order by word;
create table wc as select word,count(1) as count from (select explode(split(line,' ')) as word from wctext) w group by word order by word;
6.查看统计结果
1.准备电子书或其它大的文本文件
hdfs dfs -put story.txt input/wcHive/
3.创建文本表
create table docs(line string);
4.映射HDFS中的文件数据到文本表中
create table docs(line string);
load data inpath '/user/hadoop/input/wcHive/story.txt' overwrite into table docs;
5.hql语句进行词频统计交将结果保存到结果表中
create table word_count as select word,count(1) as count from (select explode(split(line,' ')) as word from docs) word group by word order by word;
6.查看统计结果
show tables; select * from word_count;
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。