1)编程实现文件的合并和去重
对于两个输入文件,即文件 A 和文件 B,请编写 MapReduce 程序,对两个文件进行合并, 并剔除其中重复的内容,得到一个新的输出文件 C。
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
/*
* 思路:
* 数据去重的目标是让原数据中出现次数超过一次的数据在输出文件中只出现一次。
* 将不同文件中相同内容数据的Key设置成一样的,
* 在Reduce,不管value-list是什么样,只要在最终结果输出它的key就行了。
*/
public class Merge {
public static class Map extends Mapper<Object, Text, Text, Text> {// 输入键 输入值 输出键 输出值
private static Text text = new Text();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
text = value;
context.write(text, new Text(""));
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
context.write(key, new Text(""));
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String[] otherArgs = new String[] { "hdfs://localhost:9000/MapReduceTest/Merge/input",
"hdfs://localhost:9000/MapReduceTest/Merge/output" };
if (otherArgs.length != 2) {
System.err.println("Usage:Merge and duplicate removal <in> <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "Merge and duplicate removal");
job.setJarByClass(Merge.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
2)编程实现对输入文件的排序
现在有多个输入文件,每个文件中的每行内容均为一个整数。要求读取所有文件中的整数, 进行升序排序后,输出到一个新的文件中,输出的数据格式为每行两个整数,第一个数字为第二个整数的排序位次,第二个整数为原待排列的整数。
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/*
* 思路:
* MapReduce默认按照key值排序,
* 如果key为封装int的IntWritable类型,那么MapReduce按照数字大小对key排序。
* 所以在Map中将读入的数据转化成IntWritable型,然后作为key值输出,value可以随意设。
* reduce拿到之后,将输入的key作为value输出,并根据value-list中元素的个数决定输出的次数。
*/
public class Sort {
public static class Map extends Mapper<Object, Text, IntWritable, IntWritable> {
private static IntWritable data = new IntWritable();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
data.set(Integer.parseInt(line));
context.write(data, new IntWritable(1));
}
}
public static class Reduce extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
private static IntWritable number = new IntWritable(1);
public void reduce(IntWritable key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
for (IntWritable num : values) {
context.write(number, key);
number = new IntWritable(number.get() + 1);
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String[] otherArgs = new String[] { "hdfs://localhost:9000/MapReduceTest/Sort/input",
"hdfs://localhost:9000/MapReduceTest/Sort/output" };
if (otherArgs.length != 2) {
System.err.println("Usage:Merge and duplicate removal <in> <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "Merge and sort");
job.setJarByClass(Sort.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
3)对指定的表格进行信息挖掘
给出一个 child-parent 的表格,要求挖掘其中的父子辈关系,给出祖孙辈关系的表格。
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/*
* 思路:
* 将人名作为键,值分为两种,一个记录子辈一个记录父辈
* 同一个人的子辈和父辈是祖孙
*/
public class Relation {
public static class Map extends Mapper<Object, Text, Text, Text> {
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
String child_name = new String();
String parent_name = new String();
String line = value.toString();
int i = 0;
while (line.charAt(i) != ' ') {
i++;
}
String[] values = { line.substring(0, i), line.substring(i + 1) };// 分割两个名字
if (values[0].compareTo("child") != 0) {
child_name = values[0];
parent_name = values[1];
context.write(new Text(values[0]), new Text("1-" + child_name + "-" + parent_name));
context.write(new Text(values[1]), new Text("2-" + child_name + "-" + parent_name));
}
}
}
public static int head = 0;
public static class Reduce extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
if (head == 0) {
context.write(new Text("grandchild"), new Text("grandparent"));
head = 1;
}
int grandchild_num = 0;
String grandchild[] = new String[32];
int grandparent_num = 0;
String grandparent[] = new String[32];
Iterator<Text> ite = values.iterator();
while (ite.hasNext()) {
String record = ite.next().toString();
int len = record.length();
char relation_type = record.charAt(0);
String child_name = new String();
String parent_name = new String();
int i = 2;
while (record.charAt(i) != '-') {
child_name = child_name + record.charAt(i);
i++;
}
i++;
while (i < len) {
parent_name = parent_name + record.charAt(i);
i++;
}
if (relation_type == '1') {
grandparent[grandparent_num] = parent_name;
grandparent_num++;
} else {
grandchild[grandchild_num] = child_name;
grandchild_num++;
}
}
for (int m = 0; m < grandchild_num; m++) {
for (int n = 0; n < grandparent_num; n++) {
context.write(new Text(grandchild[m]), new Text(grandparent[n]));
}
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String[] otherArgs = new String[] { "hdfs://localhost:9000/MapReduceTest/Relation/input",
"hdfs://localhost:9000/MapReduceTest/Relation/output" };
if (otherArgs.length != 2) {
System.err.println("Usage:Relation <in> <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "Relation");
job.setJarByClass(Relation.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
本内容为合法授权发布,文章内容为作者独立观点,不代表开发云立场,未经允许不得转载。