大数据技术之倒排索引(多job串联)详解大数据

7.8 倒排索引(多job串联

0)需求:有大量的文本(文档、网页),需要建立搜索索引

大数据技术之倒排索引(多job串联)详解大数据

xyg pingping 
xyg ss 
xyg ss

a.txt

大数据技术之倒排索引(多job串联)详解大数据

xyg pingping 
xyg pingping 
pingping ss

b.txt

大数据技术之倒排索引(多job串联)详解大数据

xyg ss 
xyg pingping

c.txt

大数据技术之倒排索引(多job串联)详解大数据

1)第一次预期输出结果

xyg--a.txt 3 
xyg--b.txt 2 
xyg--c.txt 2 
pingping--a.txt  1 
pingping--b.txt 3 
pingping--c.txt  1 
ss--a.txt 2 
ss--b.txt 1 
ss--c.txt 1

2第二次预期输出结果

xyg c.txt-->2 b.txt-->2 a.txt-->3 
pingping c.txt-->1 b.txt-->3 a.txt-->1 
ss c.txt-->1 b.txt-->1 a.txt-->2

1)第一次处理

1)第一次处理,编写OneIndexMapper

package com.xyg.mapreduce.index;
import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class OneIndexMapper extends Mapper<LongWritable, Text, Text, IntWritable> { Text k = new Text(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 1 获取切片名称 FileSplit inputSplit = (FileSplit) context.getInputSplit(); String name = inputSplit.getPath().getName(); // 2 获取1行 String line = value.toString(); // 3 截取 String[] words = line.split(" "); // 4 把每个单词和切片名称关联起来 for (String word : words) { k.set(word + "--" + name); context.write(k, new IntWritable(1)); } } }

(2)第一次处理,编写OneIndexReducer

package com.xyg.mapreduce.index;
import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer;
public class OneIndexReducer extends Reducer<Text, IntWritable, Text, IntWritable>{ @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int count = 0; // 累加和 for(IntWritable value: values){ count +=value.get(); } // 写出 context.write(key, new IntWritable(count)); } }

(3)第一次处理,编写OneIndexDriver

package com.xyg.mapreduce.index;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class OneIndexDriver { public static void main(String[] args) throws Exception { args = new String[] { "e:/inputoneindex", "e:/output5" }; Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(OneIndexDriver.class); job.setMapperClass(OneIndexMapper.class); job.setReducerClass(OneIndexReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); job.waitForCompletion(true); } }

4)查看第一次输出结果

xyg--a.txt    3 
xyg--b.txt    2 
xyg--c.txt    2 
pingping--a.txt    1 
pingping--b.txt    3 
pingping--c.txt    1 
ss--a.txt    2 
ss--b.txt    1 
ss--c.txt    1

2)第二次处理

1)第二次处理,编写TwoIndexMapper

package com.xyg.mapreduce.index;
import java.io.IOException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;
public class TwoIndexMapper extends Mapper<LongWritable, Text, Text, Text>{ Text k = new Text(); Text v = new Text(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 1 获取1行数据 String line = value.toString(); // 2用“--”切割 String[] fields = line.split("--"); k.set(fields[0]); v.set(fields[1]); // 3 输出数据 context.write(k, v); } }

(2)第二次处理,编写TwoIndexReducer

package com.xyg.mapreduce.index;
import java.io.IOException; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer;
public class TwoIndexReducer extends Reducer<Text, Text, Text, Text> { @Override protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { // xyg a.txt 3 // xyg b.txt 2 // xyg c.txt 2 // xyg c.txt-->2 b.txt-->2 a.txt-->3 StringBuilder sb = new StringBuilder(); for (Text value : values) { sb.append(value.toString().replace("/t", "-->") + "/t"); } context.write(key, new Text(sb.toString())); } }

(3)第二次处理,编写TwoIndexDriver

package com.xyg.mapreduce.index;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class TwoIndexDriver { public static void main(String[] args) throws Exception { args = new String[] { "e:/inputtwoindex", "e:/output6" }; Configuration config = new Configuration(); Job job = Job.getInstance(config); job.setMapperClass(TwoIndexMapper.class); job.setReducerClass(TwoIndexReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } }

4第二次查看最终结果

xyg     c.txt-->2    b.txt-->2    a.txt-->3     
pingping    c.txt-->1    b.txt-->3    a.txt-->1     
ss    c.txt-->1    b.txt-->1    a.txt-->2    

 

原创文章,作者:Maggie-Hunter,如若转载,请注明出处:https://blog.ytso.com/9443.html

(0)
上一篇 2021年7月19日
下一篇 2021年7月19日

相关推荐

发表回复

登录后才能评论